diff --git a/hack/update-vendor.sh b/hack/update-vendor.sh index 65ae31090..2dd6dcd97 100755 --- a/hack/update-vendor.sh +++ b/hack/update-vendor.sh @@ -23,5 +23,10 @@ set -o pipefail # TODO(random-liu): Remove this after #106 is resolved. ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"/.. cd ${ROOT} +echo "Replace invalid imports..." find vendor/ -name *.go | xargs sed -i 's/"github.com\/Sirupsen\/logrus"/"github.com\/sirupsen\/logrus"/g' + +echo "Sort vendor.conf..." +sort vendor.conf -o vendor.conf + echo "Please commit the change made by this file..." diff --git a/vendor.conf b/vendor.conf index c8e88b2d8..37ec7557e 100644 --- a/vendor.conf +++ b/vendor.conf @@ -1,7 +1,6 @@ -github.com/Microsoft/go-winio v0.4.4 github.com/blang/semver v3.1.0 github.com/boltdb/bolt v1.3.0-58-ge9cf4fa -github.com/containerd/containerd 2386062ce152d6f158d22be5991fe11c7cf67535 +github.com/containerd/containerd 2386062ce152d6f158d22be5991fe11c7cf67535 github.com/containerd/continuity 86cec1535a968310e7532819f699ff2830ed7463 github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6 github.com/containernetworking/cni v0.4.0 @@ -9,13 +8,24 @@ github.com/davecgh/go-spew v1.1.0 github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621 github.com/docker/docker v1.13.1 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 +github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528 +github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46 github.com/fsnotify/fsnotify 7d7316ed6e1ed2de075aab8dfc76de5d158d66e1 +github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8 github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9 github.com/golang/protobuf 5a0f697c9ed9d68fef0116532c6e05cfeae00e55 +github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c +github.com/go-openapi/jsonpointer 46af16f9f7b149af66e5d1bd010e3574dc06de98 +github.com/go-openapi/jsonreference 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272 +github.com/go-openapi/spec 6aced65f8501fe1217321abf0749d354824ba2ff +github.com/go-openapi/swag 1d0bd113de87027671077d3c71eb3ac5d7dbba72 github.com/jpillora/backoff 06c7a16c845dc8e0bf575fafeeca0f5462f5eb4d +github.com/juju/ratelimit 5b9ff866471762aa2ab2dced63c9fb6f53921342 github.com/kubernetes-incubator/cri-o v0.3 +github.com/mailru/easyjson d5b7844b561a7bc640052f1b935f7b800330d7e0 +github.com/Microsoft/go-winio v0.4.4 github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448 github.com/opencontainers/image-spec 372ad780f63454fbbbbcc7cf80e5b90245c13e13 github.com/opencontainers/runc e775f0fba3ea329b8b766451c892c41a3d49594d @@ -23,14 +33,22 @@ github.com/opencontainers/runtime-spec v1.0.0 github.com/opencontainers/runtime-tools e29f3ca4eb806a582ee1a1864c7b0563bd64c19b github.com/pkg/errors v0.8.0 github.com/pmezard/go-difflib v1.0.0 +github.com/PuerkitoBio/purell v1.0.0 +github.com/PuerkitoBio/urlesc 5bd2802263f21d8788851d5305584c82a5c75d7e github.com/sirupsen/logrus v1.0.0 github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7 github.com/stretchr/testify v1.1.4 github.com/syndtr/gocapability e7cb7fa329f456b3855136a2642b197bad7366ba +github.com/ugorji/go ded73eae5db7e7a0ef6f55aace87a2873c5d2b74 golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 google.golang.org/grpc v1.3.0 +gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 +gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77 +k8s.io/apimachinery 47e2f36401b7a66b6d003f3df4b58f6769ccc34b +k8s.io/apiserver 9ae8a89e835edc63f714d0aadaa2101e74d1498d +k8s.io/client-go b131aa47b5c01ae1c6edf8445bc9d125f1f2d38f k8s.io/kubernetes d779e9c9561b732adf06263c5424889e7564fdbd diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE new file mode 100644 index 000000000..4b9986dea --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/LICENSE @@ -0,0 +1,12 @@ +Copyright (c) 2012, Martin Angers +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md new file mode 100644 index 000000000..a78a3df65 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/README.md @@ -0,0 +1,185 @@ +# Purell + +Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know... + +Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. + +[![build status](https://secure.travis-ci.org/PuerkitoBio/purell.png)](http://travis-ci.org/PuerkitoBio/purell) + +## Install + +`go get github.com/PuerkitoBio/purell` + +## Changelog + +* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). +* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). +* **v0.2.0** : Add benchmarks, Attempt IDN support. +* **v0.1.0** : Initial release. + +## Examples + +From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."): + +```go +package purell + +import ( + "fmt" + "net/url" +) + +func ExampleNormalizeURLString() { + if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", + FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { + panic(err) + } else { + fmt.Print(normalized) + } + // Output: http://somewebsite.com:80/Amazing%3F/url/ +} + +func ExampleMustNormalizeURLString() { + normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", + FlagsUnsafeGreedy) + fmt.Print(normalized) + + // Output: http://somewebsite.com/Amazing%FA/url +} + +func ExampleNormalizeURL() { + if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { + panic(err) + } else { + normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) + fmt.Print(normalized) + } + + // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 +} +``` + +## API + +As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags: + +```go +const ( + // Safe normalizations + FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 + FlagLowercaseHost // http://HOST -> http://host + FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF + FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA + FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ + FlagRemoveDefaultPort // http://host:80 -> http://host + FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path + + // Usually safe normalizations + FlagRemoveTrailingSlash // http://host/path/ -> http://host/path + FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) + FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c + + // Unsafe normalizations + FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ + FlagRemoveFragment // http://host/path#fragment -> http://host/path + FlagForceHTTP // https://host -> http://host + FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b + FlagRemoveWWW // http://www.host/ -> http://host/ + FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) + FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 + + // Normalizations not in the wikipedia article, required to cover tests cases + // submitted by jehiah + FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 + FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 + FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 + FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path + FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path + + // Convenience set of safe normalizations + FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator + + // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, + // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". + + // Convenience set of usually safe normalizations (includes FlagsSafe) + FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments + FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments + + // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) + FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery + FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery + + // Convenience set of all available flags + FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator + FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator +) +``` + +For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set. + +The [full godoc reference is available on gopkgdoc][godoc]. + +Some things to note: + +* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it. + +* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*): + - %24 -> $ + - %26 -> & + - %2B-%3B -> +,-./0123456789:; + - %3D -> = + - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ + - %5F -> _ + - %61-%7A -> abcdefghijklmnopqrstuvwxyz + - %7E -> ~ + + +* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization). + +* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell. + +* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object. + +### Safe vs Usually Safe vs Unsafe + +Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between. + +Consider the following URL: + +`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` + +Normalizing with the `FlagsSafe` gives: + +`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` + +With the `FlagsUsuallySafeGreedy`: + +`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid` + +And with `FlagsUnsafeGreedy`: + +`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3` + +## TODOs + +* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`. + +## Thanks / Contributions + +@rogpeppe +@jehiah +@opennota +@pchristopher1275 +@zenovich + +## License + +The [BSD 3-Clause license][bsd]. + +[bsd]: http://opensource.org/licenses/BSD-3-Clause +[wiki]: http://en.wikipedia.org/wiki/URL_normalization +[rfc]: http://tools.ietf.org/html/rfc3986#section-6 +[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell +[pr5]: https://github.com/PuerkitoBio/purell/pull/5 +[iss7]: https://github.com/PuerkitoBio/purell/issues/7 diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go new file mode 100644 index 000000000..b79da64b3 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/purell.go @@ -0,0 +1,375 @@ +/* +Package purell offers URL normalization as described on the wikipedia page: +http://en.wikipedia.org/wiki/URL_normalization +*/ +package purell + +import ( + "bytes" + "fmt" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/PuerkitoBio/urlesc" + "golang.org/x/net/idna" + "golang.org/x/text/secure/precis" + "golang.org/x/text/unicode/norm" +) + +// A set of normalization flags determines how a URL will +// be normalized. +type NormalizationFlags uint + +const ( + // Safe normalizations + FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 + FlagLowercaseHost // http://HOST -> http://host + FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF + FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA + FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ + FlagRemoveDefaultPort // http://host:80 -> http://host + FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path + + // Usually safe normalizations + FlagRemoveTrailingSlash // http://host/path/ -> http://host/path + FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) + FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c + + // Unsafe normalizations + FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ + FlagRemoveFragment // http://host/path#fragment -> http://host/path + FlagForceHTTP // https://host -> http://host + FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b + FlagRemoveWWW // http://www.host/ -> http://host/ + FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) + FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 + + // Normalizations not in the wikipedia article, required to cover tests cases + // submitted by jehiah + FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 + FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 + FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 + FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path + FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path + + // Convenience set of safe normalizations + FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator + + // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, + // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". + + // Convenience set of usually safe normalizations (includes FlagsSafe) + FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments + FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments + + // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) + FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery + FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery + + // Convenience set of all available flags + FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator + FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator +) + +const ( + defaultHttpPort = ":80" + defaultHttpsPort = ":443" +) + +// Regular expressions used by the normalizations +var rxPort = regexp.MustCompile(`(:\d+)/?$`) +var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`) +var rxDupSlashes = regexp.MustCompile(`/{2,}`) +var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`) +var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`) +var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`) +var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`) +var rxEmptyPort = regexp.MustCompile(`:+$`) + +// Map of flags to implementation function. +// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically +// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator. + +// Since maps have undefined traversing order, make a slice of ordered keys +var flagsOrder = []NormalizationFlags{ + FlagLowercaseScheme, + FlagLowercaseHost, + FlagRemoveDefaultPort, + FlagRemoveDirectoryIndex, + FlagRemoveDotSegments, + FlagRemoveFragment, + FlagForceHTTP, // Must be after remove default port (because https=443/http=80) + FlagRemoveDuplicateSlashes, + FlagRemoveWWW, + FlagAddWWW, + FlagSortQuery, + FlagDecodeDWORDHost, + FlagDecodeOctalHost, + FlagDecodeHexHost, + FlagRemoveUnnecessaryHostDots, + FlagRemoveEmptyPortSeparator, + FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last + FlagAddTrailingSlash, +} + +// ... and then the map, where order is unimportant +var flags = map[NormalizationFlags]func(*url.URL){ + FlagLowercaseScheme: lowercaseScheme, + FlagLowercaseHost: lowercaseHost, + FlagRemoveDefaultPort: removeDefaultPort, + FlagRemoveDirectoryIndex: removeDirectoryIndex, + FlagRemoveDotSegments: removeDotSegments, + FlagRemoveFragment: removeFragment, + FlagForceHTTP: forceHTTP, + FlagRemoveDuplicateSlashes: removeDuplicateSlashes, + FlagRemoveWWW: removeWWW, + FlagAddWWW: addWWW, + FlagSortQuery: sortQuery, + FlagDecodeDWORDHost: decodeDWORDHost, + FlagDecodeOctalHost: decodeOctalHost, + FlagDecodeHexHost: decodeHexHost, + FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots, + FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator, + FlagRemoveTrailingSlash: removeTrailingSlash, + FlagAddTrailingSlash: addTrailingSlash, +} + +// MustNormalizeURLString returns the normalized string, and panics if an error occurs. +// It takes an URL string as input, as well as the normalization flags. +func MustNormalizeURLString(u string, f NormalizationFlags) string { + result, e := NormalizeURLString(u, f) + if e != nil { + panic(e) + } + return result +} + +// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object. +// It takes an URL string as input, as well as the normalization flags. +func NormalizeURLString(u string, f NormalizationFlags) (string, error) { + if parsed, e := url.Parse(u); e != nil { + return "", e + } else { + options := make([]precis.Option, 1, 3) + options[0] = precis.IgnoreCase + if f&FlagLowercaseHost == FlagLowercaseHost { + options = append(options, precis.FoldCase()) + } + options = append(options, precis.Norm(norm.NFC)) + profile := precis.NewFreeform(options...) + if parsed.Host, e = idna.ToASCII(profile.NewTransformer().String(parsed.Host)); e != nil { + return "", e + } + return NormalizeURL(parsed, f), nil + } + panic("Unreachable code.") +} + +// NormalizeURL returns the normalized string. +// It takes a parsed URL object as input, as well as the normalization flags. +func NormalizeURL(u *url.URL, f NormalizationFlags) string { + for _, k := range flagsOrder { + if f&k == k { + flags[k](u) + } + } + return urlesc.Escape(u) +} + +func lowercaseScheme(u *url.URL) { + if len(u.Scheme) > 0 { + u.Scheme = strings.ToLower(u.Scheme) + } +} + +func lowercaseHost(u *url.URL) { + if len(u.Host) > 0 { + u.Host = strings.ToLower(u.Host) + } +} + +func removeDefaultPort(u *url.URL) { + if len(u.Host) > 0 { + scheme := strings.ToLower(u.Scheme) + u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { + if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { + return "" + } + return val + }) + } +} + +func removeTrailingSlash(u *url.URL) { + if l := len(u.Path); l > 0 { + if strings.HasSuffix(u.Path, "/") { + u.Path = u.Path[:l-1] + } + } else if l = len(u.Host); l > 0 { + if strings.HasSuffix(u.Host, "/") { + u.Host = u.Host[:l-1] + } + } +} + +func addTrailingSlash(u *url.URL) { + if l := len(u.Path); l > 0 { + if !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + } + } else if l = len(u.Host); l > 0 { + if !strings.HasSuffix(u.Host, "/") { + u.Host += "/" + } + } +} + +func removeDotSegments(u *url.URL) { + if len(u.Path) > 0 { + var dotFree []string + var lastIsDot bool + + sections := strings.Split(u.Path, "/") + for _, s := range sections { + if s == ".." { + if len(dotFree) > 0 { + dotFree = dotFree[:len(dotFree)-1] + } + } else if s != "." { + dotFree = append(dotFree, s) + } + lastIsDot = (s == "." || s == "..") + } + // Special case if host does not end with / and new path does not begin with / + u.Path = strings.Join(dotFree, "/") + if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + // Special case if the last segment was a dot, make sure the path ends with a slash + if lastIsDot && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + } + } +} + +func removeDirectoryIndex(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1") + } +} + +func removeFragment(u *url.URL) { + u.Fragment = "" +} + +func forceHTTP(u *url.URL) { + if strings.ToLower(u.Scheme) == "https" { + u.Scheme = "http" + } +} + +func removeDuplicateSlashes(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") + } +} + +func removeWWW(u *url.URL) { + if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") { + u.Host = u.Host[4:] + } +} + +func addWWW(u *url.URL) { + if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") { + u.Host = "www." + u.Host + } +} + +func sortQuery(u *url.URL) { + q := u.Query() + + if len(q) > 0 { + arKeys := make([]string, len(q)) + i := 0 + for k, _ := range q { + arKeys[i] = k + i++ + } + sort.Strings(arKeys) + buf := new(bytes.Buffer) + for _, k := range arKeys { + sort.Strings(q[k]) + for _, v := range q[k] { + if buf.Len() > 0 { + buf.WriteRune('&') + } + buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) + } + } + + // Rebuild the raw query string + u.RawQuery = buf.String() + } +} + +func decodeDWORDHost(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 { + var parts [4]int64 + + dword, _ := strconv.ParseInt(matches[1], 10, 0) + for i, shift := range []uint{24, 16, 8, 0} { + parts[i] = dword >> shift & 0xFF + } + u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2]) + } + } +} + +func decodeOctalHost(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 { + var parts [4]int64 + + for i := 1; i <= 4; i++ { + parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0) + } + u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5]) + } + } +} + +func decodeHexHost(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 { + // Conversion is safe because of regex validation + parsed, _ := strconv.ParseInt(matches[1], 16, 0) + // Set host as DWORD (base 10) encoded host + u.Host = fmt.Sprintf("%d%s", parsed, matches[2]) + // The rest is the same as decoding a DWORD host + decodeDWORDHost(u) + } + } +} + +func removeUnncessaryHostDots(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 { + // Trim the leading and trailing dots + u.Host = strings.Trim(matches[1], ".") + if len(matches) > 2 { + u.Host += matches[2] + } + } + } +} + +func removeEmptyPortSeparator(u *url.URL) { + if len(u.Host) > 0 { + u.Host = rxEmptyPort.ReplaceAllString(u.Host, "") + } +} diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/github.com/PuerkitoBio/urlesc/LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md new file mode 100644 index 000000000..bebe305e0 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/README.md @@ -0,0 +1,16 @@ +urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.png?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) +====== + +Package urlesc implements query escaping as per RFC 3986. + +It contains some parts of the net/url package, modified so as to allow +some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). + +## Install + + go get github.com/PuerkitoBio/urlesc + +## License + +Go license (BSD-3-Clause) + diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go new file mode 100644 index 000000000..1b8462459 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go @@ -0,0 +1,180 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package urlesc implements query escaping as per RFC 3986. +// It contains some parts of the net/url package, modified so as to allow +// some reserved characters incorrectly escaped by net/url. +// See https://github.com/golang/go/issues/5684 +package urlesc + +import ( + "bytes" + "net/url" + "strings" +) + +type encoding int + +const ( + encodePath encoding = 1 + iota + encodeUserPassword + encodeQueryComponent + encodeFragment +) + +// Return true if the specified character should be escaped when +// appearing in a URL string, according to RFC 3986. +func shouldEscape(c byte, mode encoding) bool { + // §2.3 Unreserved characters (alphanum) + if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { + return false + } + + switch c { + case '-', '.', '_', '~': // §2.3 Unreserved characters (mark) + return false + + // §2.2 Reserved characters (reserved) + case ':', '/', '?', '#', '[', ']', '@', // gen-delims + '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims + // Different sections of the URL allow a few of + // the reserved characters to appear unescaped. + switch mode { + case encodePath: // §3.3 + // The RFC allows sub-delims and : @. + // '/', '[' and ']' can be used to assign meaning to individual path + // segments. This package only manipulates the path as a whole, + // so we allow those as well. That leaves only ? and # to escape. + return c == '?' || c == '#' + + case encodeUserPassword: // §3.2.1 + // The RFC allows : and sub-delims in + // userinfo. The parsing of userinfo treats ':' as special so we must escape + // all the gen-delims. + return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@' + + case encodeQueryComponent: // §3.4 + // The RFC allows / and ?. + return c != '/' && c != '?' + + case encodeFragment: // §4.1 + // The RFC text is silent but the grammar allows + // everything, so escape nothing but # + return c == '#' + } + } + + // Everything else must be escaped. + return true +} + +// QueryEscape escapes the string so it can be safely placed +// inside a URL query. +func QueryEscape(s string) string { + return escape(s, encodeQueryComponent) +} + +func escape(s string, mode encoding) string { + spaceCount, hexCount := 0, 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c, mode) { + if c == ' ' && mode == encodeQueryComponent { + spaceCount++ + } else { + hexCount++ + } + } + } + + if spaceCount == 0 && hexCount == 0 { + return s + } + + t := make([]byte, len(s)+2*hexCount) + j := 0 + for i := 0; i < len(s); i++ { + switch c := s[i]; { + case c == ' ' && mode == encodeQueryComponent: + t[j] = '+' + j++ + case shouldEscape(c, mode): + t[j] = '%' + t[j+1] = "0123456789ABCDEF"[c>>4] + t[j+2] = "0123456789ABCDEF"[c&15] + j += 3 + default: + t[j] = s[i] + j++ + } + } + return string(t) +} + +var uiReplacer = strings.NewReplacer( + "%21", "!", + "%27", "'", + "%28", "(", + "%29", ")", + "%2A", "*", +) + +// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986. +func unescapeUserinfo(s string) string { + return uiReplacer.Replace(s) +} + +// Escape reassembles the URL into a valid URL string. +// The general form of the result is one of: +// +// scheme:opaque +// scheme://userinfo@host/path?query#fragment +// +// If u.Opaque is non-empty, String uses the first form; +// otherwise it uses the second form. +// +// In the second form, the following rules apply: +// - if u.Scheme is empty, scheme: is omitted. +// - if u.User is nil, userinfo@ is omitted. +// - if u.Host is empty, host/ is omitted. +// - if u.Scheme and u.Host are empty and u.User is nil, +// the entire scheme://userinfo@host/ is omitted. +// - if u.Host is non-empty and u.Path begins with a /, +// the form host/path does not add its own /. +// - if u.RawQuery is empty, ?query is omitted. +// - if u.Fragment is empty, #fragment is omitted. +func Escape(u *url.URL) string { + var buf bytes.Buffer + if u.Scheme != "" { + buf.WriteString(u.Scheme) + buf.WriteByte(':') + } + if u.Opaque != "" { + buf.WriteString(u.Opaque) + } else { + if u.Scheme != "" || u.Host != "" || u.User != nil { + buf.WriteString("//") + if ui := u.User; ui != nil { + buf.WriteString(unescapeUserinfo(ui.String())) + buf.WriteByte('@') + } + if h := u.Host; h != "" { + buf.WriteString(h) + } + } + if u.Path != "" && u.Path[0] != '/' && u.Host != "" { + buf.WriteByte('/') + } + buf.WriteString(escape(u.Path, encodePath)) + } + if u.RawQuery != "" { + buf.WriteByte('?') + buf.WriteString(u.RawQuery) + } + if u.Fragment != "" { + buf.WriteByte('#') + buf.WriteString(escape(u.Fragment, encodeFragment)) + } + return buf.String() +} diff --git a/vendor/github.com/docker/spdystream/LICENSE b/vendor/github.com/docker/spdystream/LICENSE new file mode 100644 index 000000000..9e4bd4dbe --- /dev/null +++ b/vendor/github.com/docker/spdystream/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/spdystream/LICENSE.docs b/vendor/github.com/docker/spdystream/LICENSE.docs new file mode 100644 index 000000000..e26cd4fc8 --- /dev/null +++ b/vendor/github.com/docker/spdystream/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/spdystream/README.md b/vendor/github.com/docker/spdystream/README.md new file mode 100644 index 000000000..11cccd0a0 --- /dev/null +++ b/vendor/github.com/docker/spdystream/README.md @@ -0,0 +1,77 @@ +# SpdyStream + +A multiplexed stream library using spdy + +## Usage + +Client example (connecting to mirroring server without auth) + +```go +package main + +import ( + "fmt" + "github.com/docker/spdystream" + "net" + "net/http" +) + +func main() { + conn, err := net.Dial("tcp", "localhost:8080") + if err != nil { + panic(err) + } + spdyConn, err := spdystream.NewConnection(conn, false) + if err != nil { + panic(err) + } + go spdyConn.Serve(spdystream.NoOpStreamHandler) + stream, err := spdyConn.CreateStream(http.Header{}, nil, false) + if err != nil { + panic(err) + } + + stream.Wait() + + fmt.Fprint(stream, "Writing to stream") + + buf := make([]byte, 25) + stream.Read(buf) + fmt.Println(string(buf)) + + stream.Close() +} +``` + +Server example (mirroring server without auth) + +```go +package main + +import ( + "github.com/docker/spdystream" + "net" +) + +func main() { + listener, err := net.Listen("tcp", "localhost:8080") + if err != nil { + panic(err) + } + for { + conn, err := listener.Accept() + if err != nil { + panic(err) + } + spdyConn, err := spdystream.NewConnection(conn, true) + if err != nil { + panic(err) + } + go spdyConn.Serve(spdystream.MirrorStreamHandler) + } +} +``` + +## Copyright and license + +Copyright © 2014-2015 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/spdystream/connection.go b/vendor/github.com/docker/spdystream/connection.go new file mode 100644 index 000000000..6031a0db1 --- /dev/null +++ b/vendor/github.com/docker/spdystream/connection.go @@ -0,0 +1,958 @@ +package spdystream + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "github.com/docker/spdystream/spdy" +) + +var ( + ErrInvalidStreamId = errors.New("Invalid stream id") + ErrTimeout = errors.New("Timeout occured") + ErrReset = errors.New("Stream reset") + ErrWriteClosedStream = errors.New("Write on closed stream") +) + +const ( + FRAME_WORKERS = 5 + QUEUE_SIZE = 50 +) + +type StreamHandler func(stream *Stream) + +type AuthHandler func(header http.Header, slot uint8, parent uint32) bool + +type idleAwareFramer struct { + f *spdy.Framer + conn *Connection + writeLock sync.Mutex + resetChan chan struct{} + setTimeoutLock sync.Mutex + setTimeoutChan chan time.Duration + timeout time.Duration +} + +func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer { + iaf := &idleAwareFramer{ + f: framer, + resetChan: make(chan struct{}, 2), + // setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about + // the same time the connection is being closed + setTimeoutChan: make(chan time.Duration, 1), + } + return iaf +} + +func (i *idleAwareFramer) monitor() { + var ( + timer *time.Timer + expired <-chan time.Time + resetChan = i.resetChan + setTimeoutChan = i.setTimeoutChan + ) +Loop: + for { + select { + case timeout := <-i.setTimeoutChan: + i.timeout = timeout + if timeout == 0 { + if timer != nil { + timer.Stop() + } + } else { + if timer == nil { + timer = time.NewTimer(timeout) + expired = timer.C + } else { + timer.Reset(timeout) + } + } + case <-resetChan: + if timer != nil && i.timeout > 0 { + timer.Reset(i.timeout) + } + case <-expired: + i.conn.streamCond.L.Lock() + streams := i.conn.streams + i.conn.streams = make(map[spdy.StreamId]*Stream) + i.conn.streamCond.Broadcast() + i.conn.streamCond.L.Unlock() + go func() { + for _, stream := range streams { + stream.resetStream() + } + i.conn.Close() + }() + case <-i.conn.closeChan: + if timer != nil { + timer.Stop() + } + + // Start a goroutine to drain resetChan. This is needed because we've seen + // some unit tests with large numbers of goroutines get into a situation + // where resetChan fills up, at least 1 call to Write() is still trying to + // send to resetChan, the connection gets closed, and this case statement + // attempts to grab the write lock that Write() already has, causing a + // deadlock. + // + // See https://github.com/docker/spdystream/issues/49 for more details. + go func() { + for _ = range resetChan { + } + }() + + go func() { + for _ = range setTimeoutChan { + } + }() + + i.writeLock.Lock() + close(resetChan) + i.resetChan = nil + i.writeLock.Unlock() + + i.setTimeoutLock.Lock() + close(i.setTimeoutChan) + i.setTimeoutChan = nil + i.setTimeoutLock.Unlock() + + break Loop + } + } + + // Drain resetChan + for _ = range resetChan { + } +} + +func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error { + i.writeLock.Lock() + defer i.writeLock.Unlock() + if i.resetChan == nil { + return io.EOF + } + err := i.f.WriteFrame(frame) + if err != nil { + return err + } + + i.resetChan <- struct{}{} + + return nil +} + +func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) { + frame, err := i.f.ReadFrame() + if err != nil { + return nil, err + } + + // resetChan should never be closed since it is only closed + // when the connection has closed its closeChan. This closure + // only occurs after all Reads have finished + // TODO (dmcgowan): refactor relationship into connection + i.resetChan <- struct{}{} + + return frame, nil +} + +func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) { + i.setTimeoutLock.Lock() + defer i.setTimeoutLock.Unlock() + + if i.setTimeoutChan == nil { + return + } + + i.setTimeoutChan <- timeout +} + +type Connection struct { + conn net.Conn + framer *idleAwareFramer + + closeChan chan bool + goneAway bool + lastStreamChan chan<- *Stream + goAwayTimeout time.Duration + closeTimeout time.Duration + + streamLock *sync.RWMutex + streamCond *sync.Cond + streams map[spdy.StreamId]*Stream + + nextIdLock sync.Mutex + receiveIdLock sync.Mutex + nextStreamId spdy.StreamId + receivedStreamId spdy.StreamId + + pingIdLock sync.Mutex + pingId uint32 + pingChans map[uint32]chan error + + shutdownLock sync.Mutex + shutdownChan chan error + hasShutdown bool + + // for testing https://github.com/docker/spdystream/pull/56 + dataFrameHandler func(*spdy.DataFrame) error +} + +// NewConnection creates a new spdy connection from an existing +// network connection. +func NewConnection(conn net.Conn, server bool) (*Connection, error) { + framer, framerErr := spdy.NewFramer(conn, conn) + if framerErr != nil { + return nil, framerErr + } + idleAwareFramer := newIdleAwareFramer(framer) + var sid spdy.StreamId + var rid spdy.StreamId + var pid uint32 + if server { + sid = 2 + rid = 1 + pid = 2 + } else { + sid = 1 + rid = 2 + pid = 1 + } + + streamLock := new(sync.RWMutex) + streamCond := sync.NewCond(streamLock) + + session := &Connection{ + conn: conn, + framer: idleAwareFramer, + + closeChan: make(chan bool), + goAwayTimeout: time.Duration(0), + closeTimeout: time.Duration(0), + + streamLock: streamLock, + streamCond: streamCond, + streams: make(map[spdy.StreamId]*Stream), + nextStreamId: sid, + receivedStreamId: rid, + + pingId: pid, + pingChans: make(map[uint32]chan error), + + shutdownChan: make(chan error), + } + session.dataFrameHandler = session.handleDataFrame + idleAwareFramer.conn = session + go idleAwareFramer.monitor() + + return session, nil +} + +// Ping sends a ping frame across the connection and +// returns the response time +func (s *Connection) Ping() (time.Duration, error) { + pid := s.pingId + s.pingIdLock.Lock() + if s.pingId > 0x7ffffffe { + s.pingId = s.pingId - 0x7ffffffe + } else { + s.pingId = s.pingId + 2 + } + s.pingIdLock.Unlock() + pingChan := make(chan error) + s.pingChans[pid] = pingChan + defer delete(s.pingChans, pid) + + frame := &spdy.PingFrame{Id: pid} + startTime := time.Now() + writeErr := s.framer.WriteFrame(frame) + if writeErr != nil { + return time.Duration(0), writeErr + } + select { + case <-s.closeChan: + return time.Duration(0), errors.New("connection closed") + case err, ok := <-pingChan: + if ok && err != nil { + return time.Duration(0), err + } + break + } + return time.Now().Sub(startTime), nil +} + +// Serve handles frames sent from the server, including reply frames +// which are needed to fully initiate connections. Both clients and servers +// should call Serve in a separate goroutine before creating streams. +func (s *Connection) Serve(newHandler StreamHandler) { + // use a WaitGroup to wait for all frames to be drained after receiving + // go-away. + var wg sync.WaitGroup + + // Parition queues to ensure stream frames are handled + // by the same worker, ensuring order is maintained + frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS) + for i := 0; i < FRAME_WORKERS; i++ { + frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE) + + // Ensure frame queue is drained when connection is closed + go func(frameQueue *PriorityFrameQueue) { + <-s.closeChan + frameQueue.Drain() + }(frameQueues[i]) + + wg.Add(1) + go func(frameQueue *PriorityFrameQueue) { + // let the WaitGroup know this worker is done + defer wg.Done() + + s.frameHandler(frameQueue, newHandler) + }(frameQueues[i]) + } + + var ( + partitionRoundRobin int + goAwayFrame *spdy.GoAwayFrame + ) +Loop: + for { + readFrame, err := s.framer.ReadFrame() + if err != nil { + if err != io.EOF { + fmt.Errorf("frame read error: %s", err) + } else { + debugMessage("(%p) EOF received", s) + } + break + } + var priority uint8 + var partition int + switch frame := readFrame.(type) { + case *spdy.SynStreamFrame: + if s.checkStreamFrame(frame) { + priority = frame.Priority + partition = int(frame.StreamId % FRAME_WORKERS) + debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId) + s.addStreamFrame(frame) + } else { + debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId) + continue + } + case *spdy.SynReplyFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.DataFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.RstStreamFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.HeadersFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.PingFrame: + priority = 0 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + case *spdy.GoAwayFrame: + // hold on to the go away frame and exit the loop + goAwayFrame = frame + break Loop + default: + priority = 7 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + } + frameQueues[partition].Push(readFrame, priority) + } + close(s.closeChan) + + // wait for all frame handler workers to indicate they've drained their queues + // before handling the go away frame + wg.Wait() + + if goAwayFrame != nil { + s.handleGoAwayFrame(goAwayFrame) + } + + // now it's safe to close remote channels and empty s.streams + s.streamCond.L.Lock() + // notify streams that they're now closed, which will + // unblock any stream Read() calls + for _, stream := range s.streams { + stream.closeRemoteChannels() + } + s.streams = make(map[spdy.StreamId]*Stream) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) { + for { + popFrame := frameQueue.Pop() + if popFrame == nil { + return + } + + var frameErr error + switch frame := popFrame.(type) { + case *spdy.SynStreamFrame: + frameErr = s.handleStreamFrame(frame, newHandler) + case *spdy.SynReplyFrame: + frameErr = s.handleReplyFrame(frame) + case *spdy.DataFrame: + frameErr = s.dataFrameHandler(frame) + case *spdy.RstStreamFrame: + frameErr = s.handleResetFrame(frame) + case *spdy.HeadersFrame: + frameErr = s.handleHeaderFrame(frame) + case *spdy.PingFrame: + frameErr = s.handlePingFrame(frame) + case *spdy.GoAwayFrame: + frameErr = s.handleGoAwayFrame(frame) + default: + frameErr = fmt.Errorf("unhandled frame type: %T", frame) + } + + if frameErr != nil { + fmt.Errorf("frame handling error: %s", frameErr) + } + } +} + +func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 { + stream, streamOk := s.getStream(streamId) + if !streamOk { + return 7 + } + return stream.priority +} + +func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) { + var parent *Stream + if frame.AssociatedToStreamId != spdy.StreamId(0) { + parent, _ = s.getStream(frame.AssociatedToStreamId) + } + + stream := &Stream{ + streamId: frame.StreamId, + parent: parent, + conn: s, + startChan: make(chan error), + headers: frame.Headers, + finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00, + replyCond: sync.NewCond(new(sync.Mutex)), + dataChan: make(chan []byte), + headerChan: make(chan http.Header), + closeChan: make(chan bool), + } + if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 { + stream.closeRemoteChannels() + } + + s.addStream(stream) +} + +// checkStreamFrame checks to see if a stream frame is allowed. +// If the stream is invalid, then a reset frame with protocol error +// will be returned. +func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool { + s.receiveIdLock.Lock() + defer s.receiveIdLock.Unlock() + if s.goneAway { + return false + } + validationErr := s.validateStreamId(frame.StreamId) + if validationErr != nil { + go func() { + resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId) + if resetErr != nil { + fmt.Errorf("reset error: %s", resetErr) + } + }() + return false + } + return true +} + +func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error { + stream, ok := s.getStream(frame.StreamId) + if !ok { + return fmt.Errorf("Missing stream: %d", frame.StreamId) + } + + newHandler(stream) + + return nil +} + +func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error { + debugMessage("(%p) Reply frame received for %d", s, frame.StreamId) + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + debugMessage("Reply frame gone away for %d", frame.StreamId) + // Stream has already gone away + return nil + } + if stream.replied { + // Stream has already received reply + return nil + } + stream.replied = true + + // TODO Check for error + if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + + close(stream.startChan) + + return nil +} + +func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error { + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + // Stream has already been removed + return nil + } + s.removeStream(stream) + stream.closeRemoteChannels() + + if !stream.replied { + stream.replied = true + stream.startChan <- ErrReset + close(stream.startChan) + } + + stream.finishLock.Lock() + stream.finished = true + stream.finishLock.Unlock() + + return nil +} + +func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error { + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + // Stream has already gone away + return nil + } + if !stream.replied { + // No reply received...Protocol error? + return nil + } + + // TODO limit headers while not blocking (use buffered chan or goroutine?) + select { + case <-stream.closeChan: + return nil + case stream.headerChan <- frame.Headers: + } + + if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + + return nil +} + +func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error { + debugMessage("(%p) Data frame received for %d", s, frame.StreamId) + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId) + // Stream has already gone away + return nil + } + if !stream.replied { + debugMessage("(%p) Data frame not replied %d", s, frame.StreamId) + // No reply received...Protocol error? + return nil + } + + debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId) + if len(frame.Data) > 0 { + stream.dataLock.RLock() + select { + case <-stream.closeChan: + debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId) + case stream.dataChan <- frame.Data: + debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId) + } + stream.dataLock.RUnlock() + } + if (frame.Flags & spdy.DataFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + return nil +} + +func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error { + if s.pingId&0x01 != frame.Id&0x01 { + return s.framer.WriteFrame(frame) + } + pingChan, pingOk := s.pingChans[frame.Id] + if pingOk { + close(pingChan) + } + return nil +} + +func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error { + debugMessage("(%p) Go away received", s) + s.receiveIdLock.Lock() + if s.goneAway { + s.receiveIdLock.Unlock() + return nil + } + s.goneAway = true + s.receiveIdLock.Unlock() + + if s.lastStreamChan != nil { + stream, _ := s.getStream(frame.LastGoodStreamId) + go func() { + s.lastStreamChan <- stream + }() + } + + // Do not block frame handler waiting for closure + go s.shutdown(s.goAwayTimeout) + + return nil +} + +func (s *Connection) remoteStreamFinish(stream *Stream) { + stream.closeRemoteChannels() + + stream.finishLock.Lock() + if stream.finished { + // Stream is fully closed, cleanup + s.removeStream(stream) + } + stream.finishLock.Unlock() +} + +// CreateStream creates a new spdy stream using the parameters for +// creating the stream frame. The stream frame will be sent upon +// calling this function, however this function does not wait for +// the reply frame. If waiting for the reply is desired, use +// the stream Wait or WaitTimeout function on the stream returned +// by this function. +func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) { + // MUST synchronize stream creation (all the way to writing the frame) + // as stream IDs **MUST** increase monotonically. + s.nextIdLock.Lock() + defer s.nextIdLock.Unlock() + + streamId := s.getNextStreamId() + if streamId == 0 { + return nil, fmt.Errorf("Unable to get new stream id") + } + + stream := &Stream{ + streamId: streamId, + parent: parent, + conn: s, + startChan: make(chan error), + headers: headers, + dataChan: make(chan []byte), + headerChan: make(chan http.Header), + closeChan: make(chan bool), + } + + debugMessage("(%p) (%p) Create stream", s, stream) + + s.addStream(stream) + + return stream, s.sendStream(stream, fin) +} + +func (s *Connection) shutdown(closeTimeout time.Duration) { + // TODO Ensure this isn't called multiple times + s.shutdownLock.Lock() + if s.hasShutdown { + s.shutdownLock.Unlock() + return + } + s.hasShutdown = true + s.shutdownLock.Unlock() + + var timeout <-chan time.Time + if closeTimeout > time.Duration(0) { + timeout = time.After(closeTimeout) + } + streamsClosed := make(chan bool) + + go func() { + s.streamCond.L.Lock() + for len(s.streams) > 0 { + debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams) + s.streamCond.Wait() + } + s.streamCond.L.Unlock() + close(streamsClosed) + }() + + var err error + select { + case <-streamsClosed: + // No active streams, close should be safe + err = s.conn.Close() + case <-timeout: + // Force ungraceful close + err = s.conn.Close() + // Wait for cleanup to clear active streams + <-streamsClosed + } + + if err != nil { + duration := 10 * time.Minute + time.AfterFunc(duration, func() { + select { + case err, ok := <-s.shutdownChan: + if ok { + fmt.Errorf("Unhandled close error after %s: %s", duration, err) + } + default: + } + }) + s.shutdownChan <- err + } + close(s.shutdownChan) + + return +} + +// Closes spdy connection by sending GoAway frame and initiating shutdown +func (s *Connection) Close() error { + s.receiveIdLock.Lock() + if s.goneAway { + s.receiveIdLock.Unlock() + return nil + } + s.goneAway = true + s.receiveIdLock.Unlock() + + var lastStreamId spdy.StreamId + if s.receivedStreamId > 2 { + lastStreamId = s.receivedStreamId - 2 + } + + goAwayFrame := &spdy.GoAwayFrame{ + LastGoodStreamId: lastStreamId, + Status: spdy.GoAwayOK, + } + + err := s.framer.WriteFrame(goAwayFrame) + if err != nil { + return err + } + + go s.shutdown(s.closeTimeout) + + return nil +} + +// CloseWait closes the connection and waits for shutdown +// to finish. Note the underlying network Connection +// is not closed until the end of shutdown. +func (s *Connection) CloseWait() error { + closeErr := s.Close() + if closeErr != nil { + return closeErr + } + shutdownErr, ok := <-s.shutdownChan + if ok { + return shutdownErr + } + return nil +} + +// Wait waits for the connection to finish shutdown or for +// the wait timeout duration to expire. This needs to be +// called either after Close has been called or the GOAWAYFRAME +// has been received. If the wait timeout is 0, this function +// will block until shutdown finishes. If wait is never called +// and a shutdown error occurs, that error will be logged as an +// unhandled error. +func (s *Connection) Wait(waitTimeout time.Duration) error { + var timeout <-chan time.Time + if waitTimeout > time.Duration(0) { + timeout = time.After(waitTimeout) + } + + select { + case err, ok := <-s.shutdownChan: + if ok { + return err + } + case <-timeout: + return ErrTimeout + } + return nil +} + +// NotifyClose registers a channel to be called when the remote +// peer inidicates connection closure. The last stream to be +// received by the remote will be sent on the channel. The notify +// timeout will determine the duration between go away received +// and the connection being closed. +func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) { + s.goAwayTimeout = timeout + s.lastStreamChan = c +} + +// SetCloseTimeout sets the amount of time close will wait for +// streams to finish before terminating the underlying network +// connection. Setting the timeout to 0 will cause close to +// wait forever, which is the default. +func (s *Connection) SetCloseTimeout(timeout time.Duration) { + s.closeTimeout = timeout +} + +// SetIdleTimeout sets the amount of time the connection may sit idle before +// it is forcefully terminated. +func (s *Connection) SetIdleTimeout(timeout time.Duration) { + s.framer.setIdleTimeout(timeout) +} + +func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + } + + headerFrame := &spdy.HeadersFrame{ + StreamId: stream.streamId, + Headers: headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(headerFrame) +} + +func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + } + + replyFrame := &spdy.SynReplyFrame{ + StreamId: stream.streamId, + Headers: headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(replyFrame) +} + +func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error { + resetFrame := &spdy.RstStreamFrame{ + StreamId: streamId, + Status: status, + } + + return s.framer.WriteFrame(resetFrame) +} + +func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error { + return s.sendResetFrame(status, stream.streamId) +} + +func (s *Connection) sendStream(stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + stream.finished = true + } + + var parentId spdy.StreamId + if stream.parent != nil { + parentId = stream.parent.streamId + } + + streamFrame := &spdy.SynStreamFrame{ + StreamId: spdy.StreamId(stream.streamId), + AssociatedToStreamId: spdy.StreamId(parentId), + Headers: stream.headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(streamFrame) +} + +// getNextStreamId returns the next sequential id +// every call should produce a unique value or an error +func (s *Connection) getNextStreamId() spdy.StreamId { + sid := s.nextStreamId + if sid > 0x7fffffff { + return 0 + } + s.nextStreamId = s.nextStreamId + 2 + return sid +} + +// PeekNextStreamId returns the next sequential id and keeps the next id untouched +func (s *Connection) PeekNextStreamId() spdy.StreamId { + sid := s.nextStreamId + return sid +} + +func (s *Connection) validateStreamId(rid spdy.StreamId) error { + if rid > 0x7fffffff || rid < s.receivedStreamId { + return ErrInvalidStreamId + } + s.receivedStreamId = rid + 2 + return nil +} + +func (s *Connection) addStream(stream *Stream) { + s.streamCond.L.Lock() + s.streams[stream.streamId] = stream + debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) removeStream(stream *Stream) { + s.streamCond.L.Lock() + delete(s.streams, stream.streamId) + debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) { + s.streamLock.RLock() + stream, ok = s.streams[streamId] + s.streamLock.RUnlock() + return +} + +// FindStream looks up the given stream id and either waits for the +// stream to be found or returns nil if the stream id is no longer +// valid. +func (s *Connection) FindStream(streamId uint32) *Stream { + var stream *Stream + var ok bool + s.streamCond.L.Lock() + stream, ok = s.streams[spdy.StreamId(streamId)] + debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok) + for !ok && streamId >= uint32(s.receivedStreamId) { + s.streamCond.Wait() + stream, ok = s.streams[spdy.StreamId(streamId)] + } + s.streamCond.L.Unlock() + return stream +} + +func (s *Connection) CloseChan() <-chan bool { + return s.closeChan +} diff --git a/vendor/github.com/docker/spdystream/handlers.go b/vendor/github.com/docker/spdystream/handlers.go new file mode 100644 index 000000000..b59fa5fdc --- /dev/null +++ b/vendor/github.com/docker/spdystream/handlers.go @@ -0,0 +1,38 @@ +package spdystream + +import ( + "io" + "net/http" +) + +// MirrorStreamHandler mirrors all streams. +func MirrorStreamHandler(stream *Stream) { + replyErr := stream.SendReply(http.Header{}, false) + if replyErr != nil { + return + } + + go func() { + io.Copy(stream, stream) + stream.Close() + }() + go func() { + for { + header, receiveErr := stream.ReceiveHeader() + if receiveErr != nil { + return + } + sendErr := stream.SendHeader(header, false) + if sendErr != nil { + return + } + } + }() +} + +// NoopStreamHandler does nothing when stream connects, most +// likely used with RejectAuthHandler which will not allow any +// streams to make it to the stream handler. +func NoOpStreamHandler(stream *Stream) { + stream.SendReply(http.Header{}, false) +} diff --git a/vendor/github.com/docker/spdystream/priority.go b/vendor/github.com/docker/spdystream/priority.go new file mode 100644 index 000000000..fc8582b5c --- /dev/null +++ b/vendor/github.com/docker/spdystream/priority.go @@ -0,0 +1,98 @@ +package spdystream + +import ( + "container/heap" + "sync" + + "github.com/docker/spdystream/spdy" +) + +type prioritizedFrame struct { + frame spdy.Frame + priority uint8 + insertId uint64 +} + +type frameQueue []*prioritizedFrame + +func (fq frameQueue) Len() int { + return len(fq) +} + +func (fq frameQueue) Less(i, j int) bool { + if fq[i].priority == fq[j].priority { + return fq[i].insertId < fq[j].insertId + } + return fq[i].priority < fq[j].priority +} + +func (fq frameQueue) Swap(i, j int) { + fq[i], fq[j] = fq[j], fq[i] +} + +func (fq *frameQueue) Push(x interface{}) { + *fq = append(*fq, x.(*prioritizedFrame)) +} + +func (fq *frameQueue) Pop() interface{} { + old := *fq + n := len(old) + *fq = old[0 : n-1] + return old[n-1] +} + +type PriorityFrameQueue struct { + queue *frameQueue + c *sync.Cond + size int + nextInsertId uint64 + drain bool +} + +func NewPriorityFrameQueue(size int) *PriorityFrameQueue { + queue := make(frameQueue, 0, size) + heap.Init(&queue) + + return &PriorityFrameQueue{ + queue: &queue, + size: size, + c: sync.NewCond(&sync.Mutex{}), + } +} + +func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) { + q.c.L.Lock() + defer q.c.L.Unlock() + for q.queue.Len() >= q.size { + q.c.Wait() + } + pFrame := &prioritizedFrame{ + frame: frame, + priority: priority, + insertId: q.nextInsertId, + } + q.nextInsertId = q.nextInsertId + 1 + heap.Push(q.queue, pFrame) + q.c.Signal() +} + +func (q *PriorityFrameQueue) Pop() spdy.Frame { + q.c.L.Lock() + defer q.c.L.Unlock() + for q.queue.Len() == 0 { + if q.drain { + return nil + } + q.c.Wait() + } + frame := heap.Pop(q.queue).(*prioritizedFrame).frame + q.c.Signal() + return frame +} + +func (q *PriorityFrameQueue) Drain() { + q.c.L.Lock() + defer q.c.L.Unlock() + q.drain = true + q.c.Broadcast() +} diff --git a/vendor/github.com/docker/spdystream/spdy/dictionary.go b/vendor/github.com/docker/spdystream/spdy/dictionary.go new file mode 100644 index 000000000..5a5ff0e14 --- /dev/null +++ b/vendor/github.com/docker/spdystream/spdy/dictionary.go @@ -0,0 +1,187 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +// headerDictionary is the dictionary sent to the zlib compressor/decompressor. +var headerDictionary = []byte{ + 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68, + 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70, + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70, + 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00, + 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00, + 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, + 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63, + 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00, + 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, + 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00, + 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65, + 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, + 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, + 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00, + 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, + 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00, + 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00, + 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00, + 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, + 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66, + 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69, + 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, + 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f, + 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73, + 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d, + 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00, + 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67, + 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d, + 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65, + 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74, + 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, + 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72, + 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00, + 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00, + 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00, + 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00, + 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, + 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00, + 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00, + 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, + 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00, + 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73, + 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79, + 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00, + 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77, + 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, + 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00, + 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30, + 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00, + 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, + 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72, + 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73, + 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69, + 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65, + 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00, + 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32, + 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35, + 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30, + 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33, + 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37, + 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30, + 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34, + 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31, + 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31, + 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34, + 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34, + 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e, + 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20, + 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f, + 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d, + 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34, + 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30, + 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30, + 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64, + 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e, + 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64, + 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74, + 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, + 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, + 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46, + 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41, + 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a, + 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41, + 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20, + 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20, + 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30, + 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e, + 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57, + 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c, + 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61, + 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20, + 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, + 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67, + 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67, + 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, + 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, + 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c, + 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c, + 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74, + 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65, + 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65, + 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64, + 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, + 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63, + 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69, + 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d, + 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a, + 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e, +} diff --git a/vendor/github.com/docker/spdystream/spdy/read.go b/vendor/github.com/docker/spdystream/spdy/read.go new file mode 100644 index 000000000..9359a9501 --- /dev/null +++ b/vendor/github.com/docker/spdystream/spdy/read.go @@ -0,0 +1,348 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "compress/zlib" + "encoding/binary" + "io" + "net/http" + "strings" +) + +func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readSynStreamFrame(h, frame) +} + +func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readSynReplyFrame(h, frame) +} + +func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { + return err + } + if frame.Status == 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + var numSettings uint32 + if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil { + return err + } + frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings) + for i := uint32(0); i < numSettings; i++ { + if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil { + return err + } + frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24) + frame.FlagIdValues[i].Id &= 0xffffff + if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil { + return err + } + } + return nil +} + +func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil { + return err + } + if frame.Id == 0 { + return &Error{ZeroStreamId, 0} + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, StreamId(frame.Id)} + } + return nil +} + +func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil { + return err + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, frame.LastGoodStreamId} + } + if frame.CFHeader.length != 8 { + return &Error{InvalidControlFrame, frame.LastGoodStreamId} + } + if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { + return err + } + return nil +} + +func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readHeadersFrame(h, frame) +} + +func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if frame.CFHeader.length != 8 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil { + return err + } + return nil +} + +func newControlFrame(frameType ControlFrameType) (controlFrame, error) { + ctor, ok := cframeCtor[frameType] + if !ok { + return nil, &Error{Err: InvalidControlFrame} + } + return ctor(), nil +} + +var cframeCtor = map[ControlFrameType]func() controlFrame{ + TypeSynStream: func() controlFrame { return new(SynStreamFrame) }, + TypeSynReply: func() controlFrame { return new(SynReplyFrame) }, + TypeRstStream: func() controlFrame { return new(RstStreamFrame) }, + TypeSettings: func() controlFrame { return new(SettingsFrame) }, + TypePing: func() controlFrame { return new(PingFrame) }, + TypeGoAway: func() controlFrame { return new(GoAwayFrame) }, + TypeHeaders: func() controlFrame { return new(HeadersFrame) }, + TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) }, +} + +func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error { + if f.headerDecompressor != nil { + f.headerReader.N = payloadSize + return nil + } + f.headerReader = io.LimitedReader{R: f.r, N: payloadSize} + decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary)) + if err != nil { + return err + } + f.headerDecompressor = decompressor + return nil +} + +// ReadFrame reads SPDY encoded data and returns a decompressed Frame. +func (f *Framer) ReadFrame() (Frame, error) { + var firstWord uint32 + if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil { + return nil, err + } + if firstWord&0x80000000 != 0 { + frameType := ControlFrameType(firstWord & 0xffff) + version := uint16(firstWord >> 16 & 0x7fff) + return f.parseControlFrame(version, frameType) + } + return f.parseDataFrame(StreamId(firstWord & 0x7fffffff)) +} + +func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) { + var length uint32 + if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { + return nil, err + } + flags := ControlFlags((length & 0xff000000) >> 24) + length &= 0xffffff + header := ControlFrameHeader{version, frameType, flags, length} + cframe, err := newControlFrame(frameType) + if err != nil { + return nil, err + } + if err = cframe.read(header, f); err != nil { + return nil, err + } + return cframe, nil +} + +func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) { + var numHeaders uint32 + if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil { + return nil, err + } + var e error + h := make(http.Header, int(numHeaders)) + for i := 0; i < int(numHeaders); i++ { + var length uint32 + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + nameBytes := make([]byte, length) + if _, err := io.ReadFull(r, nameBytes); err != nil { + return nil, err + } + name := string(nameBytes) + if name != strings.ToLower(name) { + e = &Error{UnlowercasedHeaderName, streamId} + name = strings.ToLower(name) + } + if h[name] != nil { + e = &Error{DuplicateHeaders, streamId} + } + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + value := make([]byte, length) + if _, err := io.ReadFull(r, value); err != nil { + return nil, err + } + valueList := strings.Split(string(value), headerValueSeparator) + for _, v := range valueList { + h.Add(name, v) + } + } + if e != nil { + return h, e + } + return h, nil +} + +func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil { + return err + } + if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil { + return err + } + frame.Priority >>= 5 + if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 10)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + for h := range frame.Headers { + if invalidReqHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 4)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + for h := range frame.Headers { + if invalidRespHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 4)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + var invalidHeaders map[string]bool + if frame.StreamId%2 == 0 { + invalidHeaders = invalidReqHeaders + } else { + invalidHeaders = invalidRespHeaders + } + for h := range frame.Headers { + if invalidHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) { + var length uint32 + if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { + return nil, err + } + var frame DataFrame + frame.StreamId = streamId + frame.Flags = DataFlags(length >> 24) + length &= 0xffffff + frame.Data = make([]byte, length) + if _, err := io.ReadFull(f.r, frame.Data); err != nil { + return nil, err + } + if frame.StreamId == 0 { + return nil, &Error{ZeroStreamId, 0} + } + return &frame, nil +} diff --git a/vendor/github.com/docker/spdystream/spdy/types.go b/vendor/github.com/docker/spdystream/spdy/types.go new file mode 100644 index 000000000..7b6ee9c6f --- /dev/null +++ b/vendor/github.com/docker/spdystream/spdy/types.go @@ -0,0 +1,275 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package spdy implements the SPDY protocol (currently SPDY/3), described in +// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3. +package spdy + +import ( + "bytes" + "compress/zlib" + "io" + "net/http" +) + +// Version is the protocol version number that this package implements. +const Version = 3 + +// ControlFrameType stores the type field in a control frame header. +type ControlFrameType uint16 + +const ( + TypeSynStream ControlFrameType = 0x0001 + TypeSynReply = 0x0002 + TypeRstStream = 0x0003 + TypeSettings = 0x0004 + TypePing = 0x0006 + TypeGoAway = 0x0007 + TypeHeaders = 0x0008 + TypeWindowUpdate = 0x0009 +) + +// ControlFlags are the flags that can be set on a control frame. +type ControlFlags uint8 + +const ( + ControlFlagFin ControlFlags = 0x01 + ControlFlagUnidirectional = 0x02 + ControlFlagSettingsClearSettings = 0x01 +) + +// DataFlags are the flags that can be set on a data frame. +type DataFlags uint8 + +const ( + DataFlagFin DataFlags = 0x01 +) + +// MaxDataLength is the maximum number of bytes that can be stored in one frame. +const MaxDataLength = 1<<24 - 1 + +// headerValueSepator separates multiple header values. +const headerValueSeparator = "\x00" + +// Frame is a single SPDY frame in its unpacked in-memory representation. Use +// Framer to read and write it. +type Frame interface { + write(f *Framer) error +} + +// ControlFrameHeader contains all the fields in a control frame header, +// in its unpacked in-memory representation. +type ControlFrameHeader struct { + // Note, high bit is the "Control" bit. + version uint16 // spdy version number + frameType ControlFrameType + Flags ControlFlags + length uint32 // length of data field +} + +type controlFrame interface { + Frame + read(h ControlFrameHeader, f *Framer) error +} + +// StreamId represents a 31-bit value identifying the stream. +type StreamId uint32 + +// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM +// frame. +type SynStreamFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to + Priority uint8 // priority of this frame (3-bit) + Slot uint8 // index in the server's credential vector of the client certificate + Headers http.Header +} + +// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame. +type SynReplyFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Headers http.Header +} + +// RstStreamStatus represents the status that led to a RST_STREAM. +type RstStreamStatus uint32 + +const ( + ProtocolError RstStreamStatus = iota + 1 + InvalidStream + RefusedStream + UnsupportedVersion + Cancel + InternalError + FlowControlError + StreamInUse + StreamAlreadyClosed + InvalidCredentials + FrameTooLarge +) + +// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM +// frame. +type RstStreamFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Status RstStreamStatus +} + +// SettingsFlag represents a flag in a SETTINGS frame. +type SettingsFlag uint8 + +const ( + FlagSettingsPersistValue SettingsFlag = 0x1 + FlagSettingsPersisted = 0x2 +) + +// SettingsFlag represents the id of an id/value pair in a SETTINGS frame. +type SettingsId uint32 + +const ( + SettingsUploadBandwidth SettingsId = iota + 1 + SettingsDownloadBandwidth + SettingsRoundTripTime + SettingsMaxConcurrentStreams + SettingsCurrentCwnd + SettingsDownloadRetransRate + SettingsInitialWindowSize + SettingsClientCretificateVectorSize +) + +// SettingsFlagIdValue is the unpacked, in-memory representation of the +// combined flag/id/value for a setting in a SETTINGS frame. +type SettingsFlagIdValue struct { + Flag SettingsFlag + Id SettingsId + Value uint32 +} + +// SettingsFrame is the unpacked, in-memory representation of a SPDY +// SETTINGS frame. +type SettingsFrame struct { + CFHeader ControlFrameHeader + FlagIdValues []SettingsFlagIdValue +} + +// PingFrame is the unpacked, in-memory representation of a PING frame. +type PingFrame struct { + CFHeader ControlFrameHeader + Id uint32 // unique id for this ping, from server is even, from client is odd. +} + +// GoAwayStatus represents the status in a GoAwayFrame. +type GoAwayStatus uint32 + +const ( + GoAwayOK GoAwayStatus = iota + GoAwayProtocolError + GoAwayInternalError +) + +// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame. +type GoAwayFrame struct { + CFHeader ControlFrameHeader + LastGoodStreamId StreamId // last stream id which was accepted by sender + Status GoAwayStatus +} + +// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame. +type HeadersFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Headers http.Header +} + +// WindowUpdateFrame is the unpacked, in-memory representation of a +// WINDOW_UPDATE frame. +type WindowUpdateFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + DeltaWindowSize uint32 // additional number of bytes to existing window size +} + +// TODO: Implement credential frame and related methods. + +// DataFrame is the unpacked, in-memory representation of a DATA frame. +type DataFrame struct { + // Note, high bit is the "Control" bit. Should be 0 for data frames. + StreamId StreamId + Flags DataFlags + Data []byte // payload data of this frame +} + +// A SPDY specific error. +type ErrorCode string + +const ( + UnlowercasedHeaderName ErrorCode = "header was not lowercased" + DuplicateHeaders = "multiple headers with same name" + WrongCompressedPayloadSize = "compressed payload size was incorrect" + UnknownFrameType = "unknown frame type" + InvalidControlFrame = "invalid control frame" + InvalidDataFrame = "invalid data frame" + InvalidHeaderPresent = "frame contained invalid header" + ZeroStreamId = "stream id zero is disallowed" +) + +// Error contains both the type of error and additional values. StreamId is 0 +// if Error is not associated with a stream. +type Error struct { + Err ErrorCode + StreamId StreamId +} + +func (e *Error) Error() string { + return string(e.Err) +} + +var invalidReqHeaders = map[string]bool{ + "Connection": true, + "Host": true, + "Keep-Alive": true, + "Proxy-Connection": true, + "Transfer-Encoding": true, +} + +var invalidRespHeaders = map[string]bool{ + "Connection": true, + "Keep-Alive": true, + "Proxy-Connection": true, + "Transfer-Encoding": true, +} + +// Framer handles serializing/deserializing SPDY frames, including compressing/ +// decompressing payloads. +type Framer struct { + headerCompressionDisabled bool + w io.Writer + headerBuf *bytes.Buffer + headerCompressor *zlib.Writer + r io.Reader + headerReader io.LimitedReader + headerDecompressor io.ReadCloser +} + +// NewFramer allocates a new Framer for a given SPDY connection, represented by +// a io.Writer and io.Reader. Note that Framer will read and write individual fields +// from/to the Reader and Writer, so the caller should pass in an appropriately +// buffered implementation to optimize performance. +func NewFramer(w io.Writer, r io.Reader) (*Framer, error) { + compressBuf := new(bytes.Buffer) + compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary)) + if err != nil { + return nil, err + } + framer := &Framer{ + w: w, + headerBuf: compressBuf, + headerCompressor: compressor, + r: r, + } + return framer, nil +} diff --git a/vendor/github.com/docker/spdystream/spdy/write.go b/vendor/github.com/docker/spdystream/spdy/write.go new file mode 100644 index 000000000..b212f66a2 --- /dev/null +++ b/vendor/github.com/docker/spdystream/spdy/write.go @@ -0,0 +1,318 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "encoding/binary" + "io" + "net/http" + "strings" +) + +func (frame *SynStreamFrame) write(f *Framer) error { + return f.writeSynStreamFrame(frame) +} + +func (frame *SynReplyFrame) write(f *Framer) error { + return f.writeSynReplyFrame(frame) +} + +func (frame *RstStreamFrame) write(f *Framer) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeRstStream + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if frame.Status == 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { + return + } + return +} + +func (frame *SettingsFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSettings + frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil { + return + } + for _, flagIdValue := range frame.FlagIdValues { + flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id) + if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil { + return + } + } + return +} + +func (frame *PingFrame) write(f *Framer) (err error) { + if frame.Id == 0 { + return &Error{ZeroStreamId, 0} + } + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypePing + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 4 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil { + return + } + return +} + +func (frame *GoAwayFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeGoAway + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { + return + } + return nil +} + +func (frame *HeadersFrame) write(f *Framer) error { + return f.writeHeadersFrame(frame) +} + +func (frame *WindowUpdateFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeWindowUpdate + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil { + return + } + return nil +} + +func (frame *DataFrame) write(f *Framer) error { + return f.writeDataFrame(frame) +} + +// WriteFrame writes a frame. +func (f *Framer) WriteFrame(frame Frame) error { + return frame.write(f) +} + +func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error { + if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil { + return err + } + if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil { + return err + } + flagsAndLength := uint32(h.Flags)<<24 | h.length + if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil { + return err + } + return nil +} + +func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) { + n = 0 + if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil { + return + } + n += 2 + for name, values := range h { + if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil { + return + } + n += 2 + name = strings.ToLower(name) + if _, err = io.WriteString(w, name); err != nil { + return + } + n += len(name) + v := strings.Join(values, headerValueSeparator) + if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil { + return + } + n += 2 + if _, err = io.WriteString(w, v); err != nil { + return + } + n += len(v) + } + return +} + +func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSynStream + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil { + return err + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return err + } + f.headerBuf.Reset() + return nil +} + +func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSynReply + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return + } + f.headerBuf.Reset() + return +} + +func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeHeaders + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return + } + f.headerBuf.Reset() + return +} + +func (f *Framer) writeDataFrame(frame *DataFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength { + return &Error{InvalidDataFrame, frame.StreamId} + } + + // Serialize frame to Writer. + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data)) + if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil { + return + } + if _, err = f.w.Write(frame.Data); err != nil { + return + } + return nil +} diff --git a/vendor/github.com/docker/spdystream/stream.go b/vendor/github.com/docker/spdystream/stream.go new file mode 100644 index 000000000..f9e9ee267 --- /dev/null +++ b/vendor/github.com/docker/spdystream/stream.go @@ -0,0 +1,327 @@ +package spdystream + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "github.com/docker/spdystream/spdy" +) + +var ( + ErrUnreadPartialData = errors.New("unread partial data") +) + +type Stream struct { + streamId spdy.StreamId + parent *Stream + conn *Connection + startChan chan error + + dataLock sync.RWMutex + dataChan chan []byte + unread []byte + + priority uint8 + headers http.Header + headerChan chan http.Header + finishLock sync.Mutex + finished bool + replyCond *sync.Cond + replied bool + closeLock sync.Mutex + closeChan chan bool +} + +// WriteData writes data to stream, sending a dataframe per call +func (s *Stream) WriteData(data []byte, fin bool) error { + s.waitWriteReply() + var flags spdy.DataFlags + + if fin { + flags = spdy.DataFlagFin + s.finishLock.Lock() + if s.finished { + s.finishLock.Unlock() + return ErrWriteClosedStream + } + s.finished = true + s.finishLock.Unlock() + } + + dataFrame := &spdy.DataFrame{ + StreamId: s.streamId, + Flags: flags, + Data: data, + } + + debugMessage("(%p) (%d) Writing data frame", s, s.streamId) + return s.conn.framer.WriteFrame(dataFrame) +} + +// Write writes bytes to a stream, calling write data for each call. +func (s *Stream) Write(data []byte) (n int, err error) { + err = s.WriteData(data, false) + if err == nil { + n = len(data) + } + return +} + +// Read reads bytes from a stream, a single read will never get more +// than what is sent on a single data frame, but a multiple calls to +// read may get data from the same data frame. +func (s *Stream) Read(p []byte) (n int, err error) { + if s.unread == nil { + select { + case <-s.closeChan: + return 0, io.EOF + case read, ok := <-s.dataChan: + if !ok { + return 0, io.EOF + } + s.unread = read + } + } + n = copy(p, s.unread) + if n < len(s.unread) { + s.unread = s.unread[n:] + } else { + s.unread = nil + } + return +} + +// ReadData reads an entire data frame and returns the byte array +// from the data frame. If there is unread data from the result +// of a Read call, this function will return an ErrUnreadPartialData. +func (s *Stream) ReadData() ([]byte, error) { + debugMessage("(%p) Reading data from %d", s, s.streamId) + if s.unread != nil { + return nil, ErrUnreadPartialData + } + select { + case <-s.closeChan: + return nil, io.EOF + case read, ok := <-s.dataChan: + if !ok { + return nil, io.EOF + } + return read, nil + } +} + +func (s *Stream) waitWriteReply() { + if s.replyCond != nil { + s.replyCond.L.Lock() + for !s.replied { + s.replyCond.Wait() + } + s.replyCond.L.Unlock() + } +} + +// Wait waits for the stream to receive a reply. +func (s *Stream) Wait() error { + return s.WaitTimeout(time.Duration(0)) +} + +// WaitTimeout waits for the stream to receive a reply or for timeout. +// When the timeout is reached, ErrTimeout will be returned. +func (s *Stream) WaitTimeout(timeout time.Duration) error { + var timeoutChan <-chan time.Time + if timeout > time.Duration(0) { + timeoutChan = time.After(timeout) + } + + select { + case err := <-s.startChan: + if err != nil { + return err + } + break + case <-timeoutChan: + return ErrTimeout + } + return nil +} + +// Close closes the stream by sending an empty data frame with the +// finish flag set, indicating this side is finished with the stream. +func (s *Stream) Close() error { + select { + case <-s.closeChan: + // Stream is now fully closed + s.conn.removeStream(s) + default: + break + } + return s.WriteData([]byte{}, true) +} + +// Reset sends a reset frame, putting the stream into the fully closed state. +func (s *Stream) Reset() error { + s.conn.removeStream(s) + return s.resetStream() +} + +func (s *Stream) resetStream() error { + // Always call closeRemoteChannels, even if s.finished is already true. + // This makes it so that stream.Close() followed by stream.Reset() allows + // stream.Read() to unblock. + s.closeRemoteChannels() + + s.finishLock.Lock() + if s.finished { + s.finishLock.Unlock() + return nil + } + s.finished = true + s.finishLock.Unlock() + + resetFrame := &spdy.RstStreamFrame{ + StreamId: s.streamId, + Status: spdy.Cancel, + } + return s.conn.framer.WriteFrame(resetFrame) +} + +// CreateSubStream creates a stream using the current as the parent +func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) { + return s.conn.CreateStream(headers, s, fin) +} + +// SetPriority sets the stream priority, does not affect the +// remote priority of this stream after Open has been called. +// Valid values are 0 through 7, 0 being the highest priority +// and 7 the lowest. +func (s *Stream) SetPriority(priority uint8) { + s.priority = priority +} + +// SendHeader sends a header frame across the stream +func (s *Stream) SendHeader(headers http.Header, fin bool) error { + return s.conn.sendHeaders(headers, s, fin) +} + +// SendReply sends a reply on a stream, only valid to be called once +// when handling a new stream +func (s *Stream) SendReply(headers http.Header, fin bool) error { + if s.replyCond == nil { + return errors.New("cannot reply on initiated stream") + } + s.replyCond.L.Lock() + defer s.replyCond.L.Unlock() + if s.replied { + return nil + } + + err := s.conn.sendReply(headers, s, fin) + if err != nil { + return err + } + + s.replied = true + s.replyCond.Broadcast() + return nil +} + +// Refuse sends a reset frame with the status refuse, only +// valid to be called once when handling a new stream. This +// may be used to indicate that a stream is not allowed +// when http status codes are not being used. +func (s *Stream) Refuse() error { + if s.replied { + return nil + } + s.replied = true + return s.conn.sendReset(spdy.RefusedStream, s) +} + +// Cancel sends a reset frame with the status canceled. This +// can be used at any time by the creator of the Stream to +// indicate the stream is no longer needed. +func (s *Stream) Cancel() error { + return s.conn.sendReset(spdy.Cancel, s) +} + +// ReceiveHeader receives a header sent on the other side +// of the stream. This function will block until a header +// is received or stream is closed. +func (s *Stream) ReceiveHeader() (http.Header, error) { + select { + case <-s.closeChan: + break + case header, ok := <-s.headerChan: + if !ok { + return nil, fmt.Errorf("header chan closed") + } + return header, nil + } + return nil, fmt.Errorf("stream closed") +} + +// Parent returns the parent stream +func (s *Stream) Parent() *Stream { + return s.parent +} + +// Headers returns the headers used to create the stream +func (s *Stream) Headers() http.Header { + return s.headers +} + +// String returns the string version of stream using the +// streamId to uniquely identify the stream +func (s *Stream) String() string { + return fmt.Sprintf("stream:%d", s.streamId) +} + +// Identifier returns a 32 bit identifier for the stream +func (s *Stream) Identifier() uint32 { + return uint32(s.streamId) +} + +// IsFinished returns whether the stream has finished +// sending data +func (s *Stream) IsFinished() bool { + return s.finished +} + +// Implement net.Conn interface + +func (s *Stream) LocalAddr() net.Addr { + return s.conn.conn.LocalAddr() +} + +func (s *Stream) RemoteAddr() net.Addr { + return s.conn.conn.RemoteAddr() +} + +// TODO set per stream values instead of connection-wide + +func (s *Stream) SetDeadline(t time.Time) error { + return s.conn.conn.SetDeadline(t) +} + +func (s *Stream) SetReadDeadline(t time.Time) error { + return s.conn.conn.SetReadDeadline(t) +} + +func (s *Stream) SetWriteDeadline(t time.Time) error { + return s.conn.conn.SetWriteDeadline(t) +} + +func (s *Stream) closeRemoteChannels() { + s.closeLock.Lock() + defer s.closeLock.Unlock() + select { + case <-s.closeChan: + default: + close(s.closeChan) + } +} diff --git a/vendor/github.com/docker/spdystream/utils.go b/vendor/github.com/docker/spdystream/utils.go new file mode 100644 index 000000000..1b2c199a4 --- /dev/null +++ b/vendor/github.com/docker/spdystream/utils.go @@ -0,0 +1,16 @@ +package spdystream + +import ( + "log" + "os" +) + +var ( + DEBUG = os.Getenv("DEBUG") +) + +func debugMessage(fmt string, args ...interface{}) { + if DEBUG != "" { + log.Printf(fmt, args...) + } +} diff --git a/vendor/github.com/emicklei/go-restful/LICENSE b/vendor/github.com/emicklei/go-restful/LICENSE new file mode 100644 index 000000000..ece7ec61e --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012,2013 Ernest Micklei + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/README.md new file mode 100644 index 000000000..cd1f2d0cc --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/README.md @@ -0,0 +1,74 @@ +go-restful +========== +package for building REST-style Web Services using Google Go + +[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) +[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) +[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://godoc.org/github.com/emicklei/go-restful) + +- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples) + +REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping: + +- GET = Retrieve a representation of a resource +- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm. +- PUT = Create if you are sending the full content of the specified resource (URI). +- PUT = Update if you are updating the full content of the specified resource. +- DELETE = Delete if you are requesting the server to delete the resource +- PATCH = Update partial content of a resource +- OPTIONS = Get information about the communication options for the request URI + +### Example + +```Go +ws := new(restful.WebService) +ws. + Path("/users"). + Consumes(restful.MIME_XML, restful.MIME_JSON). + Produces(restful.MIME_JSON, restful.MIME_XML) + +ws.Route(ws.GET("/{user-id}").To(u.findUser). + Doc("get a user"). + Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")). + Writes(User{})) +... + +func (u UserResource) findUser(request *restful.Request, response *restful.Response) { + id := request.PathParameter("user-id") + ... +} +``` + +[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go) + +### Features + +- Routes for request → function mapping with path parameter (e.g. {id}) support +- Configurable router: + - (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*} + - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions +- Request API for reading structs from JSON/XML and accesing parameters (path,query,header) +- Response API for writing structs to JSON/XML and setting headers +- Customizable encoding using EntityReaderWriter registration +- Filters for intercepting the request → response flow on Service or Route level +- Request-scoped variables using attributes +- Containers for WebServices on different HTTP endpoints +- Content encoding (gzip,deflate) of request and response payloads +- Automatic responses on OPTIONS (using a filter) +- Automatic CORS request handling (using a filter) +- API declaration for Swagger UI (see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12),[go-restful-openapi](https://github.com/emicklei/go-restful-openapi)) +- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...) +- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) +- Configurable (trace) logging +- Customizable gzip/deflate readers and writers using CompressorProvider registration + +### Resources + +- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/) +- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/) +- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful) +- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora) + +Type ```git shortlog -s``` for a full list of contributors. + +© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/compress.go b/vendor/github.com/emicklei/go-restful/compress.go new file mode 100644 index 000000000..220b37712 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/compress.go @@ -0,0 +1,123 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bufio" + "compress/gzip" + "compress/zlib" + "errors" + "io" + "net" + "net/http" + "strings" +) + +// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting. +var EnableContentEncoding = false + +// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib) +type CompressingResponseWriter struct { + writer http.ResponseWriter + compressor io.WriteCloser + encoding string +} + +// Header is part of http.ResponseWriter interface +func (c *CompressingResponseWriter) Header() http.Header { + return c.writer.Header() +} + +// WriteHeader is part of http.ResponseWriter interface +func (c *CompressingResponseWriter) WriteHeader(status int) { + c.writer.WriteHeader(status) +} + +// Write is part of http.ResponseWriter interface +// It is passed through the compressor +func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) { + if c.isCompressorClosed() { + return -1, errors.New("Compressing error: tried to write data using closed compressor") + } + return c.compressor.Write(bytes) +} + +// CloseNotify is part of http.CloseNotifier interface +func (c *CompressingResponseWriter) CloseNotify() <-chan bool { + return c.writer.(http.CloseNotifier).CloseNotify() +} + +// Close the underlying compressor +func (c *CompressingResponseWriter) Close() error { + if c.isCompressorClosed() { + return errors.New("Compressing error: tried to close already closed compressor") + } + + c.compressor.Close() + if ENCODING_GZIP == c.encoding { + currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer)) + } + if ENCODING_DEFLATE == c.encoding { + currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer)) + } + // gc hint needed? + c.compressor = nil + return nil +} + +func (c *CompressingResponseWriter) isCompressorClosed() bool { + return nil == c.compressor +} + +// Hijack implements the Hijacker interface +// This is especially useful when combining Container.EnabledContentEncoding +// in combination with websockets (for instance gorilla/websocket) +func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker, ok := c.writer.(http.Hijacker) + if !ok { + return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface") + } + return hijacker.Hijack() +} + +// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested. +func wantsCompressedResponse(httpRequest *http.Request) (bool, string) { + header := httpRequest.Header.Get(HEADER_AcceptEncoding) + gi := strings.Index(header, ENCODING_GZIP) + zi := strings.Index(header, ENCODING_DEFLATE) + // use in order of appearance + if gi == -1 { + return zi != -1, ENCODING_DEFLATE + } else if zi == -1 { + return gi != -1, ENCODING_GZIP + } else { + if gi < zi { + return true, ENCODING_GZIP + } + return true, ENCODING_DEFLATE + } +} + +// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate} +func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) { + httpWriter.Header().Set(HEADER_ContentEncoding, encoding) + c := new(CompressingResponseWriter) + c.writer = httpWriter + var err error + if ENCODING_GZIP == encoding { + w := currentCompressorProvider.AcquireGzipWriter() + w.Reset(httpWriter) + c.compressor = w + c.encoding = ENCODING_GZIP + } else if ENCODING_DEFLATE == encoding { + w := currentCompressorProvider.AcquireZlibWriter() + w.Reset(httpWriter) + c.compressor = w + c.encoding = ENCODING_DEFLATE + } else { + return nil, errors.New("Unknown encoding:" + encoding) + } + return c, err +} diff --git a/vendor/github.com/emicklei/go-restful/compressor_cache.go b/vendor/github.com/emicklei/go-restful/compressor_cache.go new file mode 100644 index 000000000..ee426010a --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/compressor_cache.go @@ -0,0 +1,103 @@ +package restful + +// Copyright 2015 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "compress/gzip" + "compress/zlib" +) + +// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount +// of writers and readers (resources). +// If a new resource is acquired and all are in use, it will return a new unmanaged resource. +type BoundedCachedCompressors struct { + gzipWriters chan *gzip.Writer + gzipReaders chan *gzip.Reader + zlibWriters chan *zlib.Writer + writersCapacity int + readersCapacity int +} + +// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors. +func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors { + b := &BoundedCachedCompressors{ + gzipWriters: make(chan *gzip.Writer, writersCapacity), + gzipReaders: make(chan *gzip.Reader, readersCapacity), + zlibWriters: make(chan *zlib.Writer, writersCapacity), + writersCapacity: writersCapacity, + readersCapacity: readersCapacity, + } + for ix := 0; ix < writersCapacity; ix++ { + b.gzipWriters <- newGzipWriter() + b.zlibWriters <- newZlibWriter() + } + for ix := 0; ix < readersCapacity; ix++ { + b.gzipReaders <- newGzipReader() + } + return b +} + +// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released. +func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer { + var writer *gzip.Writer + select { + case writer, _ = <-b.gzipWriters: + default: + // return a new unmanaged one + writer = newGzipWriter() + } + return writer +} + +// ReleaseGzipWriter accepts a writer (does not have to be one that was cached) +// only when the cache has room for it. It will ignore it otherwise. +func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) { + // forget the unmanaged ones + if len(b.gzipWriters) < b.writersCapacity { + b.gzipWriters <- w + } +} + +// AcquireGzipReader returns a *gzip.Reader. Needs to be released. +func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader { + var reader *gzip.Reader + select { + case reader, _ = <-b.gzipReaders: + default: + // return a new unmanaged one + reader = newGzipReader() + } + return reader +} + +// ReleaseGzipReader accepts a reader (does not have to be one that was cached) +// only when the cache has room for it. It will ignore it otherwise. +func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) { + // forget the unmanaged ones + if len(b.gzipReaders) < b.readersCapacity { + b.gzipReaders <- r + } +} + +// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released. +func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer { + var writer *zlib.Writer + select { + case writer, _ = <-b.zlibWriters: + default: + // return a new unmanaged one + writer = newZlibWriter() + } + return writer +} + +// ReleaseZlibWriter accepts a writer (does not have to be one that was cached) +// only when the cache has room for it. It will ignore it otherwise. +func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) { + // forget the unmanaged ones + if len(b.zlibWriters) < b.writersCapacity { + b.zlibWriters <- w + } +} diff --git a/vendor/github.com/emicklei/go-restful/compressor_pools.go b/vendor/github.com/emicklei/go-restful/compressor_pools.go new file mode 100644 index 000000000..d866ce64b --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/compressor_pools.go @@ -0,0 +1,91 @@ +package restful + +// Copyright 2015 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bytes" + "compress/gzip" + "compress/zlib" + "sync" +) + +// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool. +type SyncPoolCompessors struct { + GzipWriterPool *sync.Pool + GzipReaderPool *sync.Pool + ZlibWriterPool *sync.Pool +} + +// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors. +func NewSyncPoolCompessors() *SyncPoolCompessors { + return &SyncPoolCompessors{ + GzipWriterPool: &sync.Pool{ + New: func() interface{} { return newGzipWriter() }, + }, + GzipReaderPool: &sync.Pool{ + New: func() interface{} { return newGzipReader() }, + }, + ZlibWriterPool: &sync.Pool{ + New: func() interface{} { return newZlibWriter() }, + }, + } +} + +func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer { + return s.GzipWriterPool.Get().(*gzip.Writer) +} + +func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) { + s.GzipWriterPool.Put(w) +} + +func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader { + return s.GzipReaderPool.Get().(*gzip.Reader) +} + +func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) { + s.GzipReaderPool.Put(r) +} + +func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer { + return s.ZlibWriterPool.Get().(*zlib.Writer) +} + +func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) { + s.ZlibWriterPool.Put(w) +} + +func newGzipWriter() *gzip.Writer { + // create with an empty bytes writer; it will be replaced before using the gzipWriter + writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) + if err != nil { + panic(err.Error()) + } + return writer +} + +func newGzipReader() *gzip.Reader { + // create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader + // we can safely use currentCompressProvider because it is set on package initialization. + w := currentCompressorProvider.AcquireGzipWriter() + defer currentCompressorProvider.ReleaseGzipWriter(w) + b := new(bytes.Buffer) + w.Reset(b) + w.Flush() + w.Close() + reader, err := gzip.NewReader(bytes.NewReader(b.Bytes())) + if err != nil { + panic(err.Error()) + } + return reader +} + +func newZlibWriter() *zlib.Writer { + writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) + if err != nil { + panic(err.Error()) + } + return writer +} diff --git a/vendor/github.com/emicklei/go-restful/compressors.go b/vendor/github.com/emicklei/go-restful/compressors.go new file mode 100644 index 000000000..cb32f7ef5 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/compressors.go @@ -0,0 +1,54 @@ +package restful + +// Copyright 2015 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "compress/gzip" + "compress/zlib" +) + +// CompressorProvider describes a component that can provider compressors for the std methods. +type CompressorProvider interface { + // Returns a *gzip.Writer which needs to be released later. + // Before using it, call Reset(). + AcquireGzipWriter() *gzip.Writer + + // Releases an aqcuired *gzip.Writer. + ReleaseGzipWriter(w *gzip.Writer) + + // Returns a *gzip.Reader which needs to be released later. + AcquireGzipReader() *gzip.Reader + + // Releases an aqcuired *gzip.Reader. + ReleaseGzipReader(w *gzip.Reader) + + // Returns a *zlib.Writer which needs to be released later. + // Before using it, call Reset(). + AcquireZlibWriter() *zlib.Writer + + // Releases an aqcuired *zlib.Writer. + ReleaseZlibWriter(w *zlib.Writer) +} + +// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip). +var currentCompressorProvider CompressorProvider + +func init() { + currentCompressorProvider = NewSyncPoolCompessors() +} + +// CurrentCompressorProvider returns the current CompressorProvider. +// It is initialized using a SyncPoolCompessors. +func CurrentCompressorProvider() CompressorProvider { + return currentCompressorProvider +} + +// CompressorProvider sets the actual provider of compressors (zlib or gzip). +func SetCompressorProvider(p CompressorProvider) { + if p == nil { + panic("cannot set compressor provider to nil") + } + currentCompressorProvider = p +} diff --git a/vendor/github.com/emicklei/go-restful/constants.go b/vendor/github.com/emicklei/go-restful/constants.go new file mode 100644 index 000000000..203439c5e --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/constants.go @@ -0,0 +1,30 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +const ( + MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces() + MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces() + MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default + + HEADER_Allow = "Allow" + HEADER_Accept = "Accept" + HEADER_Origin = "Origin" + HEADER_ContentType = "Content-Type" + HEADER_LastModified = "Last-Modified" + HEADER_AcceptEncoding = "Accept-Encoding" + HEADER_ContentEncoding = "Content-Encoding" + HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers" + HEADER_AccessControlRequestMethod = "Access-Control-Request-Method" + HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers" + HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods" + HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin" + HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials" + HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers" + HEADER_AccessControlMaxAge = "Access-Control-Max-Age" + + ENCODING_GZIP = "gzip" + ENCODING_DEFLATE = "deflate" +) diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/container.go new file mode 100644 index 000000000..657d5b6dd --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/container.go @@ -0,0 +1,366 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bytes" + "errors" + "fmt" + "net/http" + "os" + "runtime" + "strings" + "sync" + + "github.com/emicklei/go-restful/log" +) + +// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests. +// The requests are further dispatched to routes of WebServices using a RouteSelector +type Container struct { + webServicesLock sync.RWMutex + webServices []*WebService + ServeMux *http.ServeMux + isRegisteredOnRoot bool + containerFilters []FilterFunction + doNotRecover bool // default is true + recoverHandleFunc RecoverHandleFunction + serviceErrorHandleFunc ServiceErrorHandleFunction + router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative) + contentEncodingEnabled bool // default is false +} + +// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter) +func NewContainer() *Container { + return &Container{ + webServices: []*WebService{}, + ServeMux: http.NewServeMux(), + isRegisteredOnRoot: false, + containerFilters: []FilterFunction{}, + doNotRecover: true, + recoverHandleFunc: logStackOnRecover, + serviceErrorHandleFunc: writeServiceError, + router: CurlyRouter{}, + contentEncodingEnabled: false} +} + +// RecoverHandleFunction declares functions that can be used to handle a panic situation. +// The first argument is what recover() returns. The second must be used to communicate an error response. +type RecoverHandleFunction func(interface{}, http.ResponseWriter) + +// RecoverHandler changes the default function (logStackOnRecover) to be called +// when a panic is detected. DoNotRecover must be have its default value (=false). +func (c *Container) RecoverHandler(handler RecoverHandleFunction) { + c.recoverHandleFunc = handler +} + +// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation. +// The first argument is the service error, the second is the request that resulted in the error and +// the third must be used to communicate an error response. +type ServiceErrorHandleFunction func(ServiceError, *Request, *Response) + +// ServiceErrorHandler changes the default function (writeServiceError) to be called +// when a ServiceError is detected. +func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) { + c.serviceErrorHandleFunc = handler +} + +// DoNotRecover controls whether panics will be caught to return HTTP 500. +// If set to true, Route functions are responsible for handling any error situation. +// Default value is true. +func (c *Container) DoNotRecover(doNot bool) { + c.doNotRecover = doNot +} + +// Router changes the default Router (currently CurlyRouter) +func (c *Container) Router(aRouter RouteSelector) { + c.router = aRouter +} + +// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. +func (c *Container) EnableContentEncoding(enabled bool) { + c.contentEncodingEnabled = enabled +} + +// Add a WebService to the Container. It will detect duplicate root paths and exit in that case. +func (c *Container) Add(service *WebService) *Container { + c.webServicesLock.Lock() + defer c.webServicesLock.Unlock() + + // if rootPath was not set then lazy initialize it + if len(service.rootPath) == 0 { + service.Path("/") + } + + // cannot have duplicate root paths + for _, each := range c.webServices { + if each.RootPath() == service.RootPath() { + log.Printf("[restful] WebService with duplicate root path detected:['%v']", each) + os.Exit(1) + } + } + + // If not registered on root then add specific mapping + if !c.isRegisteredOnRoot { + c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux) + } + c.webServices = append(c.webServices, service) + return c +} + +// addHandler may set a new HandleFunc for the serveMux +// this function must run inside the critical region protected by the webServicesLock. +// returns true if the function was registered on root ("/") +func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool { + pattern := fixedPrefixPath(service.RootPath()) + // check if root path registration is needed + if "/" == pattern || "" == pattern { + serveMux.HandleFunc("/", c.dispatch) + return true + } + // detect if registration already exists + alreadyMapped := false + for _, each := range c.webServices { + if each.RootPath() == service.RootPath() { + alreadyMapped = true + break + } + } + if !alreadyMapped { + serveMux.HandleFunc(pattern, c.dispatch) + if !strings.HasSuffix(pattern, "/") { + serveMux.HandleFunc(pattern+"/", c.dispatch) + } + } + return false +} + +func (c *Container) Remove(ws *WebService) error { + if c.ServeMux == http.DefaultServeMux { + errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws) + log.Printf(errMsg) + return errors.New(errMsg) + } + c.webServicesLock.Lock() + defer c.webServicesLock.Unlock() + // build a new ServeMux and re-register all WebServices + newServeMux := http.NewServeMux() + newServices := []*WebService{} + newIsRegisteredOnRoot := false + for _, each := range c.webServices { + if each.rootPath != ws.rootPath { + // If not registered on root then add specific mapping + if !newIsRegisteredOnRoot { + newIsRegisteredOnRoot = c.addHandler(each, newServeMux) + } + newServices = append(newServices, each) + } + } + c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot + return nil +} + +// logStackOnRecover is the default RecoverHandleFunction and is called +// when DoNotRecover is false and the recoverHandleFunc is not set for the container. +// Default implementation logs the stacktrace and writes the stacktrace on the response. +// This may be a security issue as it exposes sourcecode information. +func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) { + var buffer bytes.Buffer + buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason)) + for i := 2; ; i += 1 { + _, file, line, ok := runtime.Caller(i) + if !ok { + break + } + buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line)) + } + log.Print(buffer.String()) + httpWriter.WriteHeader(http.StatusInternalServerError) + httpWriter.Write(buffer.Bytes()) +} + +// writeServiceError is the default ServiceErrorHandleFunction and is called +// when a ServiceError is returned during route selection. Default implementation +// calls resp.WriteErrorString(err.Code, err.Message) +func writeServiceError(err ServiceError, req *Request, resp *Response) { + resp.WriteErrorString(err.Code, err.Message) +} + +// Dispatch the incoming Http Request to a matching WebService. +func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { + if httpWriter == nil { + panic("httpWriter cannot be nil") + } + if httpRequest == nil { + panic("httpRequest cannot be nil") + } + c.dispatch(httpWriter, httpRequest) +} + +// Dispatch the incoming Http Request to a matching WebService. +func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { + writer := httpWriter + + // CompressingResponseWriter should be closed after all operations are done + defer func() { + if compressWriter, ok := writer.(*CompressingResponseWriter); ok { + compressWriter.Close() + } + }() + + // Instal panic recovery unless told otherwise + if !c.doNotRecover { // catch all for 500 response + defer func() { + if r := recover(); r != nil { + c.recoverHandleFunc(r, writer) + return + } + }() + } + + // Detect if compression is needed + // assume without compression, test for override + if c.contentEncodingEnabled { + doCompress, encoding := wantsCompressedResponse(httpRequest) + if doCompress { + var err error + writer, err = NewCompressingResponseWriter(httpWriter, encoding) + if err != nil { + log.Print("[restful] unable to install compressor: ", err) + httpWriter.WriteHeader(http.StatusInternalServerError) + return + } + } + } + // Find best match Route ; err is non nil if no match was found + var webService *WebService + var route *Route + var err error + func() { + c.webServicesLock.RLock() + defer c.webServicesLock.RUnlock() + webService, route, err = c.router.SelectRoute( + c.webServices, + httpRequest) + }() + if err != nil { + // a non-200 response has already been written + // run container filters anyway ; they should not touch the response... + chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { + switch err.(type) { + case ServiceError: + ser := err.(ServiceError) + c.serviceErrorHandleFunc(ser, req, resp) + } + // TODO + }} + chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer)) + return + } + wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest) + // pass through filters (if any) + if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 { + // compose filter chain + allFilters := []FilterFunction{} + allFilters = append(allFilters, c.containerFilters...) + allFilters = append(allFilters, webService.filters...) + allFilters = append(allFilters, route.Filters...) + chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) { + // handle request by route after passing all filters + route.Function(wrappedRequest, wrappedResponse) + }} + chain.ProcessFilter(wrappedRequest, wrappedResponse) + } else { + // no filters, handle request by route + route.Function(wrappedRequest, wrappedResponse) + } +} + +// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {} +func fixedPrefixPath(pathspec string) string { + varBegin := strings.Index(pathspec, "{") + if -1 == varBegin { + return pathspec + } + return pathspec[:varBegin] +} + +// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server +func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) { + c.ServeMux.ServeHTTP(httpwriter, httpRequest) +} + +// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics. +func (c *Container) Handle(pattern string, handler http.Handler) { + c.ServeMux.Handle(pattern, handler) +} + +// HandleWithFilter registers the handler for the given pattern. +// Container's filter chain is applied for handler. +// If a handler already exists for pattern, HandleWithFilter panics. +func (c *Container) HandleWithFilter(pattern string, handler http.Handler) { + f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) { + if len(c.containerFilters) == 0 { + handler.ServeHTTP(httpResponse, httpRequest) + return + } + + chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { + handler.ServeHTTP(httpResponse, httpRequest) + }} + chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse)) + } + + c.Handle(pattern, http.HandlerFunc(f)) +} + +// Filter appends a container FilterFunction. These are called before dispatching +// a http.Request to a WebService from the container +func (c *Container) Filter(filter FilterFunction) { + c.containerFilters = append(c.containerFilters, filter) +} + +// RegisteredWebServices returns the collections of added WebServices +func (c *Container) RegisteredWebServices() []*WebService { + c.webServicesLock.RLock() + defer c.webServicesLock.RUnlock() + result := make([]*WebService, len(c.webServices)) + for ix := range c.webServices { + result[ix] = c.webServices[ix] + } + return result +} + +// computeAllowedMethods returns a list of HTTP methods that are valid for a Request +func (c *Container) computeAllowedMethods(req *Request) []string { + // Go through all RegisteredWebServices() and all its Routes to collect the options + methods := []string{} + requestPath := req.Request.URL.Path + for _, ws := range c.RegisteredWebServices() { + matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath) + if matches != nil { + finalMatch := matches[len(matches)-1] + for _, rt := range ws.Routes() { + matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch) + if matches != nil { + lastMatch := matches[len(matches)-1] + if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’. + methods = append(methods, rt.Method) + } + } + } + } + } + // methods = append(methods, "OPTIONS") not sure about this + return methods +} + +// newBasicRequestResponse creates a pair of Request,Response from its http versions. +// It is basic because no parameter or (produces) content-type information is given. +func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) { + resp := NewResponse(httpWriter) + resp.requestAccept = httpRequest.Header.Get(HEADER_Accept) + return NewRequest(httpRequest), resp +} diff --git a/vendor/github.com/emicklei/go-restful/cors_filter.go b/vendor/github.com/emicklei/go-restful/cors_filter.go new file mode 100644 index 000000000..1efeef072 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/cors_filter.go @@ -0,0 +1,202 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "regexp" + "strconv" + "strings" +) + +// CrossOriginResourceSharing is used to create a Container Filter that implements CORS. +// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page +// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from. +// +// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing +// http://enable-cors.org/server.html +// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request +type CrossOriginResourceSharing struct { + ExposeHeaders []string // list of Header names + AllowedHeaders []string // list of Header names + AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed. + AllowedMethods []string + MaxAge int // number of seconds before requiring new Options request + CookiesAllowed bool + Container *Container + + allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check. +} + +// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html +// and http://www.html5rocks.com/static/images/cors_server_flowchart.png +func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) { + origin := req.Request.Header.Get(HEADER_Origin) + if len(origin) == 0 { + if trace { + traceLogger.Print("no Http header Origin set") + } + chain.ProcessFilter(req, resp) + return + } + if !c.isOriginAllowed(origin) { // check whether this origin is allowed + if trace { + traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns) + } + chain.ProcessFilter(req, resp) + return + } + if req.Request.Method != "OPTIONS" { + c.doActualRequest(req, resp) + chain.ProcessFilter(req, resp) + return + } + if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" { + c.doPreflightRequest(req, resp) + } else { + c.doActualRequest(req, resp) + chain.ProcessFilter(req, resp) + return + } +} + +func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) { + c.setOptionsHeaders(req, resp) + // continue processing the response +} + +func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) { + if len(c.AllowedMethods) == 0 { + if c.Container == nil { + c.AllowedMethods = DefaultContainer.computeAllowedMethods(req) + } else { + c.AllowedMethods = c.Container.computeAllowedMethods(req) + } + } + + acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod) + if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) { + if trace { + traceLogger.Printf("Http header %s:%s is not in %v", + HEADER_AccessControlRequestMethod, + acrm, + c.AllowedMethods) + } + return + } + acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders) + if len(acrhs) > 0 { + for _, each := range strings.Split(acrhs, ",") { + if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) { + if trace { + traceLogger.Printf("Http header %s:%s is not in %v", + HEADER_AccessControlRequestHeaders, + acrhs, + c.AllowedHeaders) + } + return + } + } + } + resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ",")) + resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs) + c.setOptionsHeaders(req, resp) + + // return http 200 response, no body +} + +func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) { + c.checkAndSetExposeHeaders(resp) + c.setAllowOriginHeader(req, resp) + c.checkAndSetAllowCredentials(resp) + if c.MaxAge > 0 { + resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge)) + } +} + +func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool { + if len(origin) == 0 { + return false + } + if len(c.AllowedDomains) == 0 { + return true + } + + allowed := false + for _, domain := range c.AllowedDomains { + if domain == origin { + allowed = true + break + } + } + + if !allowed { + if len(c.allowedOriginPatterns) == 0 { + // compile allowed domains to allowed origin patterns + allowedOriginRegexps, err := compileRegexps(c.AllowedDomains) + if err != nil { + return false + } + c.allowedOriginPatterns = allowedOriginRegexps + } + + for _, pattern := range c.allowedOriginPatterns { + if allowed = pattern.MatchString(origin); allowed { + break + } + } + } + + return allowed +} + +func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) { + origin := req.Request.Header.Get(HEADER_Origin) + if c.isOriginAllowed(origin) { + resp.AddHeader(HEADER_AccessControlAllowOrigin, origin) + } +} + +func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) { + if len(c.ExposeHeaders) > 0 { + resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ",")) + } +} + +func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) { + if c.CookiesAllowed { + resp.AddHeader(HEADER_AccessControlAllowCredentials, "true") + } +} + +func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool { + for _, each := range allowedMethods { + if each == method { + return true + } + } + return false +} + +func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool { + for _, each := range c.AllowedHeaders { + if strings.ToLower(each) == strings.ToLower(header) { + return true + } + } + return false +} + +// Take a list of strings and compile them into a list of regular expressions. +func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { + regexps := []*regexp.Regexp{} + for _, regexpStr := range regexpStrings { + r, err := regexp.Compile(regexpStr) + if err != nil { + return regexps, err + } + regexps = append(regexps, r) + } + return regexps, nil +} diff --git a/vendor/github.com/emicklei/go-restful/curly.go b/vendor/github.com/emicklei/go-restful/curly.go new file mode 100644 index 000000000..79f1f5aa2 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/curly.go @@ -0,0 +1,164 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "net/http" + "regexp" + "sort" + "strings" +) + +// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets. +type CurlyRouter struct{} + +// SelectRoute is part of the Router interface and returns the best match +// for the WebService and its Route for the given Request. +func (c CurlyRouter) SelectRoute( + webServices []*WebService, + httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) { + + requestTokens := tokenizePath(httpRequest.URL.Path) + + detectedService := c.detectWebService(requestTokens, webServices) + if detectedService == nil { + if trace { + traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path) + } + return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found") + } + candidateRoutes := c.selectRoutes(detectedService, requestTokens) + if len(candidateRoutes) == 0 { + if trace { + traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path) + } + return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found") + } + selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest) + if selectedRoute == nil { + return detectedService, nil, err + } + return detectedService, selectedRoute, nil +} + +// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. +func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { + candidates := sortableCurlyRoutes{} + for _, each := range ws.routes { + matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens) + if matches { + candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? + } + } + sort.Sort(sort.Reverse(candidates)) + return candidates +} + +// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are. +func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) { + if len(routeTokens) < len(requestTokens) { + // proceed in matching only if last routeToken is wildcard + count := len(routeTokens) + if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") { + return false, 0, 0 + } + // proceed + } + for i, routeToken := range routeTokens { + if i == len(requestTokens) { + // reached end of request path + return false, 0, 0 + } + requestToken := requestTokens[i] + if strings.HasPrefix(routeToken, "{") { + paramCount++ + if colon := strings.Index(routeToken, ":"); colon != -1 { + // match by regex + matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken) + if !matchesToken { + return false, 0, 0 + } + if matchesRemainder { + break + } + } + } else { // no { prefix + if requestToken != routeToken { + return false, 0, 0 + } + staticCount++ + } + } + return true, paramCount, staticCount +} + +// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens +// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]} +func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) { + regPart := routeToken[colon+1 : len(routeToken)-1] + if regPart == "*" { + if trace { + traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken) + } + return true, true + } + matched, err := regexp.MatchString(regPart, requestToken) + return (matched && err == nil), false +} + +var jsr311Router = RouterJSR311{} + +// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type +// headers of the Request. See also RouterJSR311 in jsr311.go +func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) { + // tracing is done inside detectRoute + return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest) +} + +// detectWebService returns the best matching webService given the list of path tokens. +// see also computeWebserviceScore +func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService { + var best *WebService + score := -1 + for _, each := range webServices { + matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens) + if matches && (eachScore > score) { + best = each + score = eachScore + } + } + return best +} + +// computeWebserviceScore returns whether tokens match and +// the weighted score of the longest matching consecutive tokens from the beginning. +func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) { + if len(tokens) > len(requestTokens) { + return false, 0 + } + score := 0 + for i := 0; i < len(tokens); i++ { + each := requestTokens[i] + other := tokens[i] + if len(each) == 0 && len(other) == 0 { + score++ + continue + } + if len(other) > 0 && strings.HasPrefix(other, "{") { + // no empty match + if len(each) == 0 { + return false, score + } + score += 1 + } else { + // not a parameter + if each != other { + return false, score + } + score += (len(tokens) - i) * 10 //fuzzy + } + } + return true, score +} diff --git a/vendor/github.com/emicklei/go-restful/curly_route.go b/vendor/github.com/emicklei/go-restful/curly_route.go new file mode 100644 index 000000000..296f94650 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/curly_route.go @@ -0,0 +1,52 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements. +type curlyRoute struct { + route Route + paramCount int + staticCount int +} + +type sortableCurlyRoutes []curlyRoute + +func (s *sortableCurlyRoutes) add(route curlyRoute) { + *s = append(*s, route) +} + +func (s sortableCurlyRoutes) routes() (routes []Route) { + for _, each := range s { + routes = append(routes, each.route) // TODO change return type + } + return routes +} + +func (s sortableCurlyRoutes) Len() int { + return len(s) +} +func (s sortableCurlyRoutes) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s sortableCurlyRoutes) Less(i, j int) bool { + ci := s[i] + cj := s[j] + + // primary key + if ci.staticCount < cj.staticCount { + return true + } + if ci.staticCount > cj.staticCount { + return false + } + // secundary key + if ci.paramCount < cj.paramCount { + return true + } + if ci.paramCount > cj.paramCount { + return false + } + return ci.route.Path < cj.route.Path +} diff --git a/vendor/github.com/emicklei/go-restful/doc.go b/vendor/github.com/emicklei/go-restful/doc.go new file mode 100644 index 000000000..f7c16b01f --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/doc.go @@ -0,0 +1,185 @@ +/* +Package restful , a lean package for creating REST-style WebServices without magic. + +WebServices and Routes + +A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls. +Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes. +WebServices must be added to a container (see below) in order to handler Http requests from a server. + +A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept). +This package has the logic to find the best matching Route and if found, call its Function. + + ws := new(restful.WebService) + ws. + Path("/users"). + Consumes(restful.MIME_JSON, restful.MIME_XML). + Produces(restful.MIME_JSON, restful.MIME_XML) + + ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource + + ... + + // GET http://localhost:8080/users/1 + func (u UserResource) findUser(request *restful.Request, response *restful.Response) { + id := request.PathParameter("user-id") + ... + } + +The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response. + +See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation. + +Regular expression matching Routes + +A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path. +For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters. +Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax) +This feature requires the use of a CurlyRouter. + +Containers + +A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests. +Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container. +The Default container of go-restful uses the http.DefaultServeMux. +You can create your own Container and create a new http.Server for that particular container. + + container := restful.NewContainer() + server := &http.Server{Addr: ":8081", Handler: container} + +Filters + +A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses. +You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc. +In the restful package there are three hooks into the request,response flow where filters can be added. +Each filter must define a FilterFunction: + + func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain) + +Use the following statement to pass the request,response pair to the next filter or RouteFunction + + chain.ProcessFilter(req, resp) + +Container Filters + +These are processed before any registered WebService. + + // install a (global) filter for the default container (processed before any webservice) + restful.Filter(globalLogging) + +WebService Filters + +These are processed before any Route of a WebService. + + // install a webservice filter (processed before any route) + ws.Filter(webserviceLogging).Filter(measureTime) + + +Route Filters + +These are processed before calling the function associated with the Route. + + // install 2 chained route filters (processed before calling findUser) + ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser)) + +See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations. + +Response Encoding + +Two encodings are supported: gzip and deflate. To enable this for all responses: + + restful.DefaultContainer.EnableContentEncoding(true) + +If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding. +Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route. + +See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go + +OPTIONS support + +By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request. + + Filter(OPTIONSFilter()) + +CORS + +By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests. + + cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer} + Filter(cors.Filter) + +Error Handling + +Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why. +For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation. + + 400: Bad Request + +If path or query parameters are not valid (content or type) then use http.StatusBadRequest. + + 404: Not Found + +Despite a valid URI, the resource requested may not be available + + 500: Internal Server Error + +If the application logic could not process the request (or write the response) then use http.StatusInternalServerError. + + 405: Method Not Allowed + +The request has a valid URL but the method (GET,PUT,POST,...) is not allowed. + + 406: Not Acceptable + +The request does not have or has an unknown Accept Header set for this operation. + + 415: Unsupported Media Type + +The request does not have or has an unknown Content-Type Header set for this operation. + +ServiceError + +In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response. + +Performance options + +This package has several options that affect the performance of your service. It is important to understand them and how you can change it. + + restful.DefaultContainer.DoNotRecover(false) + +DoNotRecover controls whether panics will be caught to return HTTP 500. +If set to false, the container will recover from panics. +Default value is true + + restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20)) + +If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool. +Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation. + +Trouble shooting + +This package has the means to produce detail logging of the complete Http request matching process and filter invocation. +Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as: + + restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile)) + +Logging + +The restful.SetLogger() method allows you to override the logger used by the package. By default restful +uses the standard library `log` package and logs to stdout. Different logging packages are supported as +long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your +preferred package is simple. + +Resources + +[project]: https://github.com/emicklei/go-restful + +[examples]: https://github.com/emicklei/go-restful/blob/master/examples + +[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/ + +[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape + +(c) 2012-2015, http://ernestmicklei.com. MIT License +*/ +package restful diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors.go b/vendor/github.com/emicklei/go-restful/entity_accessors.go new file mode 100644 index 000000000..6ecf6c7f8 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/entity_accessors.go @@ -0,0 +1,163 @@ +package restful + +// Copyright 2015 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "encoding/json" + "encoding/xml" + "strings" + "sync" +) + +// EntityReaderWriter can read and write values using an encoding such as JSON,XML. +type EntityReaderWriter interface { + // Read a serialized version of the value from the request. + // The Request may have a decompressing reader. Depends on Content-Encoding. + Read(req *Request, v interface{}) error + + // Write a serialized version of the value on the response. + // The Response may have a compressing writer. Depends on Accept-Encoding. + // status should be a valid Http Status code + Write(resp *Response, status int, v interface{}) error +} + +// entityAccessRegistry is a singleton +var entityAccessRegistry = &entityReaderWriters{ + protection: new(sync.RWMutex), + accessors: map[string]EntityReaderWriter{}, +} + +// entityReaderWriters associates MIME to an EntityReaderWriter +type entityReaderWriters struct { + protection *sync.RWMutex + accessors map[string]EntityReaderWriter +} + +func init() { + RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON)) + RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML)) +} + +// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type. +func RegisterEntityAccessor(mime string, erw EntityReaderWriter) { + entityAccessRegistry.protection.Lock() + defer entityAccessRegistry.protection.Unlock() + entityAccessRegistry.accessors[mime] = erw +} + +// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content. +// This package is already initialized with such an accessor using the MIME_JSON contentType. +func NewEntityAccessorJSON(contentType string) EntityReaderWriter { + return entityJSONAccess{ContentType: contentType} +} + +// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content. +// This package is already initialized with such an accessor using the MIME_XML contentType. +func NewEntityAccessorXML(contentType string) EntityReaderWriter { + return entityXMLAccess{ContentType: contentType} +} + +// accessorAt returns the registered ReaderWriter for this MIME type. +func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) { + r.protection.RLock() + defer r.protection.RUnlock() + er, ok := r.accessors[mime] + if !ok { + // retry with reverse lookup + // more expensive but we are in an exceptional situation anyway + for k, v := range r.accessors { + if strings.Contains(mime, k) { + return v, true + } + } + } + return er, ok +} + +// entityXMLAccess is a EntityReaderWriter for XML encoding +type entityXMLAccess struct { + // This is used for setting the Content-Type header when writing + ContentType string +} + +// Read unmarshalls the value from XML +func (e entityXMLAccess) Read(req *Request, v interface{}) error { + return xml.NewDecoder(req.Request.Body).Decode(v) +} + +// Write marshalls the value to JSON and set the Content-Type Header. +func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error { + return writeXML(resp, status, e.ContentType, v) +} + +// writeXML marshalls the value to JSON and set the Content-Type Header. +func writeXML(resp *Response, status int, contentType string, v interface{}) error { + if v == nil { + resp.WriteHeader(status) + // do not write a nil representation + return nil + } + if resp.prettyPrint { + // pretty output must be created and written explicitly + output, err := xml.MarshalIndent(v, " ", " ") + if err != nil { + return err + } + resp.Header().Set(HEADER_ContentType, contentType) + resp.WriteHeader(status) + _, err = resp.Write([]byte(xml.Header)) + if err != nil { + return err + } + _, err = resp.Write(output) + return err + } + // not-so-pretty + resp.Header().Set(HEADER_ContentType, contentType) + resp.WriteHeader(status) + return xml.NewEncoder(resp).Encode(v) +} + +// entityJSONAccess is a EntityReaderWriter for JSON encoding +type entityJSONAccess struct { + // This is used for setting the Content-Type header when writing + ContentType string +} + +// Read unmarshalls the value from JSON +func (e entityJSONAccess) Read(req *Request, v interface{}) error { + decoder := json.NewDecoder(req.Request.Body) + decoder.UseNumber() + return decoder.Decode(v) +} + +// Write marshalls the value to JSON and set the Content-Type Header. +func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error { + return writeJSON(resp, status, e.ContentType, v) +} + +// write marshalls the value to JSON and set the Content-Type Header. +func writeJSON(resp *Response, status int, contentType string, v interface{}) error { + if v == nil { + resp.WriteHeader(status) + // do not write a nil representation + return nil + } + if resp.prettyPrint { + // pretty output must be created and written explicitly + output, err := json.MarshalIndent(v, " ", " ") + if err != nil { + return err + } + resp.Header().Set(HEADER_ContentType, contentType) + resp.WriteHeader(status) + _, err = resp.Write(output) + return err + } + // not-so-pretty + resp.Header().Set(HEADER_ContentType, contentType) + resp.WriteHeader(status) + return json.NewEncoder(resp).Encode(v) +} diff --git a/vendor/github.com/emicklei/go-restful/filter.go b/vendor/github.com/emicklei/go-restful/filter.go new file mode 100644 index 000000000..c23bfb591 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/filter.go @@ -0,0 +1,35 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction. +type FilterChain struct { + Filters []FilterFunction // ordered list of FilterFunction + Index int // index into filters that is currently in progress + Target RouteFunction // function to call after passing all filters +} + +// ProcessFilter passes the request,response pair through the next of Filters. +// Each filter can decide to proceed to the next Filter or handle the Response itself. +func (f *FilterChain) ProcessFilter(request *Request, response *Response) { + if f.Index < len(f.Filters) { + f.Index++ + f.Filters[f.Index-1](request, response, f) + } else { + f.Target(request, response) + } +} + +// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction +type FilterFunction func(*Request, *Response, *FilterChain) + +// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching +// See examples/restful-no-cache-filter.go for usage +func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) { + resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1. + resp.Header().Set("Pragma", "no-cache") // HTTP 1.0. + resp.Header().Set("Expires", "0") // Proxies. + chain.ProcessFilter(req, resp) +} diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/jsr311.go new file mode 100644 index 000000000..511444ac6 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/jsr311.go @@ -0,0 +1,248 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "errors" + "fmt" + "net/http" + "sort" +) + +// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions) +// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html. +// RouterJSR311 implements the Router interface. +// Concept of locators is not implemented. +type RouterJSR311 struct{} + +// SelectRoute is part of the Router interface and returns the best match +// for the WebService and its Route for the given Request. +func (r RouterJSR311) SelectRoute( + webServices []*WebService, + httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) { + + // Identify the root resource class (WebService) + dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices) + if err != nil { + return nil, nil, NewError(http.StatusNotFound, "") + } + // Obtain the set of candidate methods (Routes) + routes := r.selectRoutes(dispatcher, finalMatch) + if len(routes) == 0 { + return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found") + } + + // Identify the method (Route) that will handle the request + route, ok := r.detectRoute(routes, httpRequest) + return dispatcher, route, ok +} + +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 +func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { + // http method + methodOk := []Route{} + for _, each := range routes { + if httpRequest.Method == each.Method { + methodOk = append(methodOk, each) + } + } + if len(methodOk) == 0 { + if trace { + traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(routes), httpRequest.Method) + } + return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed") + } + inputMediaOk := methodOk + + // content-type + contentType := httpRequest.Header.Get(HEADER_ContentType) + inputMediaOk = []Route{} + for _, each := range methodOk { + if each.matchesContentType(contentType) { + inputMediaOk = append(inputMediaOk, each) + } + } + if len(inputMediaOk) == 0 { + if trace { + traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(methodOk), contentType) + } + return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") + } + + // accept + outputMediaOk := []Route{} + accept := httpRequest.Header.Get(HEADER_Accept) + if len(accept) == 0 { + accept = "*/*" + } + for _, each := range inputMediaOk { + if each.matchesAccept(accept) { + outputMediaOk = append(outputMediaOk, each) + } + } + if len(outputMediaOk) == 0 { + if trace { + traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(inputMediaOk), accept) + } + return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable") + } + // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil + return &outputMediaOk[0], nil +} + +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 +// n/m > n/* > */* +func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route { + // TODO + return &routes[0] +} + +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2) +func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route { + filtered := &sortableRouteCandidates{} + for _, each := range dispatcher.Routes() { + pathExpr := each.pathExpr + matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder) + if matches != nil { + lastMatch := matches[len(matches)-1] + if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’. + filtered.candidates = append(filtered.candidates, + routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount}) + } + } + } + if len(filtered.candidates) == 0 { + if trace { + traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder) + } + return []Route{} + } + sort.Sort(sort.Reverse(filtered)) + + // select other routes from candidates whoes expression matches rmatch + matchingRoutes := []Route{filtered.candidates[0].route} + for c := 1; c < len(filtered.candidates); c++ { + each := filtered.candidates[c] + if each.route.pathExpr.Matcher.MatchString(pathRemainder) { + matchingRoutes = append(matchingRoutes, each.route) + } + } + return matchingRoutes +} + +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1) +func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) { + filtered := &sortableDispatcherCandidates{} + for _, each := range dispatchers { + matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath) + if matches != nil { + filtered.candidates = append(filtered.candidates, + dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount}) + } + } + if len(filtered.candidates) == 0 { + if trace { + traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath) + } + return nil, "", errors.New("not found") + } + sort.Sort(sort.Reverse(filtered)) + return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil +} + +// Types and functions to support the sorting of Routes + +type routeCandidate struct { + route Route + matchesCount int // the number of capturing groups + literalCount int // the number of literal characters (means those not resulting from template variable substitution) + nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’) +} + +func (r routeCandidate) expressionToMatch() string { + return r.route.pathExpr.Source +} + +func (r routeCandidate) String() string { + return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount) +} + +type sortableRouteCandidates struct { + candidates []routeCandidate +} + +func (rcs *sortableRouteCandidates) Len() int { + return len(rcs.candidates) +} +func (rcs *sortableRouteCandidates) Swap(i, j int) { + rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i] +} +func (rcs *sortableRouteCandidates) Less(i, j int) bool { + ci := rcs.candidates[i] + cj := rcs.candidates[j] + // primary key + if ci.literalCount < cj.literalCount { + return true + } + if ci.literalCount > cj.literalCount { + return false + } + // secundary key + if ci.matchesCount < cj.matchesCount { + return true + } + if ci.matchesCount > cj.matchesCount { + return false + } + // tertiary key + if ci.nonDefaultCount < cj.nonDefaultCount { + return true + } + if ci.nonDefaultCount > cj.nonDefaultCount { + return false + } + // quaternary key ("source" is interpreted as Path) + return ci.route.Path < cj.route.Path +} + +// Types and functions to support the sorting of Dispatchers + +type dispatcherCandidate struct { + dispatcher *WebService + finalMatch string + matchesCount int // the number of capturing groups + literalCount int // the number of literal characters (means those not resulting from template variable substitution) + nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’) +} +type sortableDispatcherCandidates struct { + candidates []dispatcherCandidate +} + +func (dc *sortableDispatcherCandidates) Len() int { + return len(dc.candidates) +} +func (dc *sortableDispatcherCandidates) Swap(i, j int) { + dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i] +} +func (dc *sortableDispatcherCandidates) Less(i, j int) bool { + ci := dc.candidates[i] + cj := dc.candidates[j] + // primary key + if ci.matchesCount < cj.matchesCount { + return true + } + if ci.matchesCount > cj.matchesCount { + return false + } + // secundary key + if ci.literalCount < cj.literalCount { + return true + } + if ci.literalCount > cj.literalCount { + return false + } + // tertiary key + return ci.nonDefaultCount < cj.nonDefaultCount +} diff --git a/vendor/github.com/emicklei/go-restful/log/log.go b/vendor/github.com/emicklei/go-restful/log/log.go new file mode 100644 index 000000000..6cd44c7a5 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/log/log.go @@ -0,0 +1,34 @@ +package log + +import ( + stdlog "log" + "os" +) + +// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger +type StdLogger interface { + Print(v ...interface{}) + Printf(format string, v ...interface{}) +} + +var Logger StdLogger + +func init() { + // default Logger + SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile)) +} + +// SetLogger sets the logger for this package +func SetLogger(customLogger StdLogger) { + Logger = customLogger +} + +// Print delegates to the Logger +func Print(v ...interface{}) { + Logger.Print(v...) +} + +// Printf delegates to the Logger +func Printf(format string, v ...interface{}) { + Logger.Printf(format, v...) +} diff --git a/vendor/github.com/emicklei/go-restful/logger.go b/vendor/github.com/emicklei/go-restful/logger.go new file mode 100644 index 000000000..3f1c4db86 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/logger.go @@ -0,0 +1,32 @@ +package restful + +// Copyright 2014 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. +import ( + "github.com/emicklei/go-restful/log" +) + +var trace bool = false +var traceLogger log.StdLogger + +func init() { + traceLogger = log.Logger // use the package logger by default +} + +// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set. +// You may call EnableTracing() directly to enable trace logging to the package-wide logger. +func TraceLogger(logger log.StdLogger) { + traceLogger = logger + EnableTracing(logger != nil) +} + +// expose the setter for the global logger on the top-level package +func SetLogger(customLogger log.StdLogger) { + log.SetLogger(customLogger) +} + +// EnableTracing can be used to Trace logging on and off. +func EnableTracing(enabled bool) { + trace = enabled +} diff --git a/vendor/github.com/emicklei/go-restful/mime.go b/vendor/github.com/emicklei/go-restful/mime.go new file mode 100644 index 000000000..d7ea2b615 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/mime.go @@ -0,0 +1,45 @@ +package restful + +import ( + "strconv" + "strings" +) + +type mime struct { + media string + quality float64 +} + +// insertMime adds a mime to a list and keeps it sorted by quality. +func insertMime(l []mime, e mime) []mime { + for i, each := range l { + // if current mime has lower quality then insert before + if e.quality > each.quality { + left := append([]mime{}, l[0:i]...) + return append(append(left, e), l[i:]...) + } + } + return append(l, e) +} + +// sortedMimes returns a list of mime sorted (desc) by its specified quality. +func sortedMimes(accept string) (sorted []mime) { + for _, each := range strings.Split(accept, ",") { + typeAndQuality := strings.Split(strings.Trim(each, " "), ";") + if len(typeAndQuality) == 1 { + sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0}) + } else { + // take factor + parts := strings.Split(typeAndQuality[1], "=") + if len(parts) == 2 { + f, err := strconv.ParseFloat(parts[1], 64) + if err != nil { + traceLogger.Printf("unable to parse quality in %s, %v", each, err) + } else { + sorted = insertMime(sorted, mime{typeAndQuality[0], f}) + } + } + } + } + return +} diff --git a/vendor/github.com/emicklei/go-restful/options_filter.go b/vendor/github.com/emicklei/go-restful/options_filter.go new file mode 100644 index 000000000..4514eadcf --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/options_filter.go @@ -0,0 +1,26 @@ +package restful + +import "strings" + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method +// and provides the response with a set of allowed methods for the request URL Path. +// As for any filter, you can also install it for a particular WebService within a Container. +// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). +func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) { + if "OPTIONS" != req.Request.Method { + chain.ProcessFilter(req, resp) + return + } + resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ",")) +} + +// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method +// and provides the response with a set of allowed methods for the request URL Path. +// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). +func OPTIONSFilter() FilterFunction { + return DefaultContainer.OPTIONSFilter +} diff --git a/vendor/github.com/emicklei/go-restful/parameter.go b/vendor/github.com/emicklei/go-restful/parameter.go new file mode 100644 index 000000000..e11c8162a --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/parameter.go @@ -0,0 +1,114 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +const ( + // PathParameterKind = indicator of Request parameter type "path" + PathParameterKind = iota + + // QueryParameterKind = indicator of Request parameter type "query" + QueryParameterKind + + // BodyParameterKind = indicator of Request parameter type "body" + BodyParameterKind + + // HeaderParameterKind = indicator of Request parameter type "header" + HeaderParameterKind + + // FormParameterKind = indicator of Request parameter type "form" + FormParameterKind +) + +// Parameter is for documententing the parameter used in a Http Request +// ParameterData kinds are Path,Query and Body +type Parameter struct { + data *ParameterData +} + +// ParameterData represents the state of a Parameter. +// It is made public to make it accessible to e.g. the Swagger package. +type ParameterData struct { + Name, Description, DataType, DataFormat string + Kind int + Required bool + AllowableValues map[string]string + AllowMultiple bool + DefaultValue string +} + +// Data returns the state of the Parameter +func (p *Parameter) Data() ParameterData { + return *p.data +} + +// Kind returns the parameter type indicator (see const for valid values) +func (p *Parameter) Kind() int { + return p.data.Kind +} + +func (p *Parameter) bePath() *Parameter { + p.data.Kind = PathParameterKind + return p +} +func (p *Parameter) beQuery() *Parameter { + p.data.Kind = QueryParameterKind + return p +} +func (p *Parameter) beBody() *Parameter { + p.data.Kind = BodyParameterKind + return p +} + +func (p *Parameter) beHeader() *Parameter { + p.data.Kind = HeaderParameterKind + return p +} + +func (p *Parameter) beForm() *Parameter { + p.data.Kind = FormParameterKind + return p +} + +// Required sets the required field and returns the receiver +func (p *Parameter) Required(required bool) *Parameter { + p.data.Required = required + return p +} + +// AllowMultiple sets the allowMultiple field and returns the receiver +func (p *Parameter) AllowMultiple(multiple bool) *Parameter { + p.data.AllowMultiple = multiple + return p +} + +// AllowableValues sets the allowableValues field and returns the receiver +func (p *Parameter) AllowableValues(values map[string]string) *Parameter { + p.data.AllowableValues = values + return p +} + +// DataType sets the dataType field and returns the receiver +func (p *Parameter) DataType(typeName string) *Parameter { + p.data.DataType = typeName + return p +} + +// DataFormat sets the dataFormat field for Swagger UI +func (p *Parameter) DataFormat(formatName string) *Parameter { + p.data.DataFormat = formatName + return p +} + +// DefaultValue sets the default value field and returns the receiver +func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter { + p.data.DefaultValue = stringRepresentation + return p +} + +// Description sets the description value field and returns the receiver +func (p *Parameter) Description(doc string) *Parameter { + p.data.Description = doc + return p +} diff --git a/vendor/github.com/emicklei/go-restful/path_expression.go b/vendor/github.com/emicklei/go-restful/path_expression.go new file mode 100644 index 000000000..a921e6f22 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/path_expression.go @@ -0,0 +1,69 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bytes" + "fmt" + "regexp" + "strings" +) + +// PathExpression holds a compiled path expression (RegExp) needed to match against +// Http request paths and to extract path parameter values. +type pathExpression struct { + LiteralCount int // the number of literal characters (means those not resulting from template variable substitution) + VarCount int // the number of named parameters (enclosed by {}) in the path + Matcher *regexp.Regexp + Source string // Path as defined by the RouteBuilder + tokens []string +} + +// NewPathExpression creates a PathExpression from the input URL path. +// Returns an error if the path is invalid. +func newPathExpression(path string) (*pathExpression, error) { + expression, literalCount, varCount, tokens := templateToRegularExpression(path) + compiled, err := regexp.Compile(expression) + if err != nil { + return nil, err + } + return &pathExpression{literalCount, varCount, compiled, expression, tokens}, nil +} + +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3 +func templateToRegularExpression(template string) (expression string, literalCount int, varCount int, tokens []string) { + var buffer bytes.Buffer + buffer.WriteString("^") + //tokens = strings.Split(template, "/") + tokens = tokenizePath(template) + for _, each := range tokens { + if each == "" { + continue + } + buffer.WriteString("/") + if strings.HasPrefix(each, "{") { + // check for regular expression in variable + colon := strings.Index(each, ":") + if colon != -1 { + // extract expression + paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1]) + if paramExpr == "*" { // special case + buffer.WriteString("(.*)") + } else { + buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache + } + } else { + // plain var + buffer.WriteString("([^/]+?)") + } + varCount += 1 + } else { + literalCount += len(each) + encoded := each // TODO URI encode + buffer.WriteString(regexp.QuoteMeta(encoded)) + } + } + return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varCount, tokens +} diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/request.go new file mode 100644 index 000000000..8c23af12c --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/request.go @@ -0,0 +1,113 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "compress/zlib" + "net/http" +) + +var defaultRequestContentType string + +// Request is a wrapper for a http Request that provides convenience methods +type Request struct { + Request *http.Request + pathParameters map[string]string + attributes map[string]interface{} // for storing request-scoped values + selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees +} + +func NewRequest(httpRequest *http.Request) *Request { + return &Request{ + Request: httpRequest, + pathParameters: map[string]string{}, + attributes: map[string]interface{}{}, + } // empty parameters, attributes +} + +// If ContentType is missing or */* is given then fall back to this type, otherwise +// a "Unable to unmarshal content of type:" response is returned. +// Valid values are restful.MIME_JSON and restful.MIME_XML +// Example: +// restful.DefaultRequestContentType(restful.MIME_JSON) +func DefaultRequestContentType(mime string) { + defaultRequestContentType = mime +} + +// PathParameter accesses the Path parameter value by its name +func (r *Request) PathParameter(name string) string { + return r.pathParameters[name] +} + +// PathParameters accesses the Path parameter values +func (r *Request) PathParameters() map[string]string { + return r.pathParameters +} + +// QueryParameter returns the (first) Query parameter value by its name +func (r *Request) QueryParameter(name string) string { + return r.Request.FormValue(name) +} + +// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error. +func (r *Request) BodyParameter(name string) (string, error) { + err := r.Request.ParseForm() + if err != nil { + return "", err + } + return r.Request.PostFormValue(name), nil +} + +// HeaderParameter returns the HTTP Header value of a Header name or empty if missing +func (r *Request) HeaderParameter(name string) string { + return r.Request.Header.Get(name) +} + +// ReadEntity checks the Accept header and reads the content into the entityPointer. +func (r *Request) ReadEntity(entityPointer interface{}) (err error) { + contentType := r.Request.Header.Get(HEADER_ContentType) + contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding) + + // check if the request body needs decompression + if ENCODING_GZIP == contentEncoding { + gzipReader := currentCompressorProvider.AcquireGzipReader() + defer currentCompressorProvider.ReleaseGzipReader(gzipReader) + gzipReader.Reset(r.Request.Body) + r.Request.Body = gzipReader + } else if ENCODING_DEFLATE == contentEncoding { + zlibReader, err := zlib.NewReader(r.Request.Body) + if err != nil { + return err + } + r.Request.Body = zlibReader + } + + // lookup the EntityReader, use defaultRequestContentType if needed and provided + entityReader, ok := entityAccessRegistry.accessorAt(contentType) + if !ok { + if len(defaultRequestContentType) != 0 { + entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType) + } + if !ok { + return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType) + } + } + return entityReader.Read(r, entityPointer) +} + +// SetAttribute adds or replaces the attribute with the given value. +func (r *Request) SetAttribute(name string, value interface{}) { + r.attributes[name] = value +} + +// Attribute returns the value associated to the given name. Returns nil if absent. +func (r Request) Attribute(name string) interface{} { + return r.attributes[name] +} + +// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees +func (r Request) SelectedRoutePath() string { + return r.selectedRoutePath +} diff --git a/vendor/github.com/emicklei/go-restful/response.go b/vendor/github.com/emicklei/go-restful/response.go new file mode 100644 index 000000000..3b33ab220 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/response.go @@ -0,0 +1,236 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "errors" + "net/http" +) + +// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime) +var DefaultResponseMimeType string + +//PrettyPrintResponses controls the indentation feature of XML and JSON serialization +var PrettyPrintResponses = true + +// Response is a wrapper on the actual http ResponseWriter +// It provides several convenience methods to prepare and write response content. +type Response struct { + http.ResponseWriter + requestAccept string // mime-type what the Http Request says it wants to receive + routeProduces []string // mime-types what the Route says it can produce + statusCode int // HTTP status code that has been written explicity (if zero then net/http has written 200) + contentLength int // number of bytes written for the response body + prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses. + err error // err property is kept when WriteError is called +} + +// NewResponse creates a new response based on a http ResponseWriter. +func NewResponse(httpWriter http.ResponseWriter) *Response { + return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types +} + +// DefaultResponseContentType set a default. +// If Accept header matching fails, fall back to this type. +// Valid values are restful.MIME_JSON and restful.MIME_XML +// Example: +// restful.DefaultResponseContentType(restful.MIME_JSON) +func DefaultResponseContentType(mime string) { + DefaultResponseMimeType = mime +} + +// InternalServerError writes the StatusInternalServerError header. +// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason) +func (r Response) InternalServerError() Response { + r.WriteHeader(http.StatusInternalServerError) + return r +} + +// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output. +func (r *Response) PrettyPrint(bePretty bool) { + r.prettyPrint = bePretty +} + +// AddHeader is a shortcut for .Header().Add(header,value) +func (r Response) AddHeader(header string, value string) Response { + r.Header().Add(header, value) + return r +} + +// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing. +func (r *Response) SetRequestAccepts(mime string) { + r.requestAccept = mime +} + +// EntityWriter returns the registered EntityWriter that the entity (requested resource) +// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say. +// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable. +func (r *Response) EntityWriter() (EntityReaderWriter, bool) { + sorted := sortedMimes(r.requestAccept) + for _, eachAccept := range sorted { + for _, eachProduce := range r.routeProduces { + if eachProduce == eachAccept.media { + if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok { + return w, true + } + } + } + if eachAccept.media == "*/*" { + for _, each := range r.routeProduces { + if w, ok := entityAccessRegistry.accessorAt(each); ok { + return w, true + } + } + } + } + // if requestAccept is empty + writer, ok := entityAccessRegistry.accessorAt(r.requestAccept) + if !ok { + // if not registered then fallback to the defaults (if set) + if DefaultResponseMimeType == MIME_JSON { + return entityAccessRegistry.accessorAt(MIME_JSON) + } + if DefaultResponseMimeType == MIME_XML { + return entityAccessRegistry.accessorAt(MIME_XML) + } + // Fallback to whatever the route says it can produce. + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + for _, each := range r.routeProduces { + if w, ok := entityAccessRegistry.accessorAt(each); ok { + return w, true + } + } + if trace { + traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept) + } + } + return writer, ok +} + +// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200) +func (r *Response) WriteEntity(value interface{}) error { + return r.WriteHeaderAndEntity(http.StatusOK, value) +} + +// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters. +// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces. +// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header. +// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead. +// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written. +// Current implementation ignores any q-parameters in the Accept Header. +// Returns an error if the value could not be written on the response. +func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error { + writer, ok := r.EntityWriter() + if !ok { + r.WriteHeader(http.StatusNotAcceptable) + return nil + } + return writer.Write(r, status, value) +} + +// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value) +// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter. +func (r *Response) WriteAsXml(value interface{}) error { + return writeXML(r, http.StatusOK, MIME_XML, value) +} + +// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value) +// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter. +func (r *Response) WriteHeaderAndXml(status int, value interface{}) error { + return writeXML(r, status, MIME_XML, value) +} + +// WriteAsJson is a convenience method for writing a value in json. +// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. +func (r *Response) WriteAsJson(value interface{}) error { + return writeJSON(r, http.StatusOK, MIME_JSON, value) +} + +// WriteJson is a convenience method for writing a value in Json with a given Content-Type. +// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. +func (r *Response) WriteJson(value interface{}, contentType string) error { + return writeJSON(r, http.StatusOK, contentType, value) +} + +// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type. +// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. +func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error { + return writeJSON(r, status, contentType, value) +} + +// WriteError write the http status and the error string on the response. +func (r *Response) WriteError(httpStatus int, err error) error { + r.err = err + return r.WriteErrorString(httpStatus, err.Error()) +} + +// WriteServiceError is a convenience method for a responding with a status and a ServiceError +func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error { + r.err = err + return r.WriteHeaderAndEntity(httpStatus, err) +} + +// WriteErrorString is a convenience method for an error status with the actual error +func (r *Response) WriteErrorString(httpStatus int, errorReason string) error { + if r.err == nil { + // if not called from WriteError + r.err = errors.New(errorReason) + } + r.WriteHeader(httpStatus) + if _, err := r.Write([]byte(errorReason)); err != nil { + return err + } + return nil +} + +// Flush implements http.Flusher interface, which sends any buffered data to the client. +func (r *Response) Flush() { + if f, ok := r.ResponseWriter.(http.Flusher); ok { + f.Flush() + } else if trace { + traceLogger.Printf("ResponseWriter %v doesn't support Flush", r) + } +} + +// WriteHeader is overridden to remember the Status Code that has been written. +// Changes to the Header of the response have no effect after this. +func (r *Response) WriteHeader(httpStatus int) { + r.statusCode = httpStatus + r.ResponseWriter.WriteHeader(httpStatus) +} + +// StatusCode returns the code that has been written using WriteHeader. +func (r Response) StatusCode() int { + if 0 == r.statusCode { + // no status code has been written yet; assume OK + return http.StatusOK + } + return r.statusCode +} + +// Write writes the data to the connection as part of an HTTP reply. +// Write is part of http.ResponseWriter interface. +func (r *Response) Write(bytes []byte) (int, error) { + written, err := r.ResponseWriter.Write(bytes) + r.contentLength += written + return written, err +} + +// ContentLength returns the number of bytes written for the response content. +// Note that this value is only correct if all data is written through the Response using its Write* methods. +// Data written directly using the underlying http.ResponseWriter is not accounted for. +func (r Response) ContentLength() int { + return r.contentLength +} + +// CloseNotify is part of http.CloseNotifier interface +func (r Response) CloseNotify() <-chan bool { + return r.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +// Error returns the err created by WriteError +func (r Response) Error() error { + return r.err +} diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/route.go new file mode 100644 index 000000000..3dd520eec --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/route.go @@ -0,0 +1,186 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bytes" + "net/http" + "strings" +) + +// RouteFunction declares the signature of a function that can be bound to a Route. +type RouteFunction func(*Request, *Response) + +// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction. +type Route struct { + Method string + Produces []string + Consumes []string + Path string // webservice root path + described path + Function RouteFunction + Filters []FilterFunction + + // cached values for dispatching + relativePath string + pathParts []string + pathExpr *pathExpression // cached compilation of relativePath as RegExp + + // documentation + Doc string + Notes string + Operation string + ParameterDocs []*Parameter + ResponseErrors map[int]ResponseError + ReadSample, WriteSample interface{} // structs that model an example request or response payload + + // Extra information used to store custom information about the route. + Metadata map[string]interface{} +} + +// Initialize for Route +func (r *Route) postBuild() { + r.pathParts = tokenizePath(r.Path) +} + +// Create Request and Response from their http versions +func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) { + params := r.extractParameters(httpRequest.URL.Path) + wrappedRequest := NewRequest(httpRequest) + wrappedRequest.pathParameters = params + wrappedRequest.selectedRoutePath = r.Path + wrappedResponse := NewResponse(httpWriter) + wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept) + wrappedResponse.routeProduces = r.Produces + return wrappedRequest, wrappedResponse +} + +// dispatchWithFilters call the function after passing through its own filters +func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) { + if len(r.Filters) > 0 { + chain := FilterChain{Filters: r.Filters, Target: r.Function} + chain.ProcessFilter(wrappedRequest, wrappedResponse) + } else { + // unfiltered + r.Function(wrappedRequest, wrappedResponse) + } +} + +// Return whether the mimeType matches to what this Route can produce. +func (r Route) matchesAccept(mimeTypesWithQuality string) bool { + parts := strings.Split(mimeTypesWithQuality, ",") + for _, each := range parts { + var withoutQuality string + if strings.Contains(each, ";") { + withoutQuality = strings.Split(each, ";")[0] + } else { + withoutQuality = each + } + // trim before compare + withoutQuality = strings.Trim(withoutQuality, " ") + if withoutQuality == "*/*" { + return true + } + for _, producibleType := range r.Produces { + if producibleType == "*/*" || producibleType == withoutQuality { + return true + } + } + } + return false +} + +// Return whether this Route can consume content with a type specified by mimeTypes (can be empty). +func (r Route) matchesContentType(mimeTypes string) bool { + + if len(r.Consumes) == 0 { + // did not specify what it can consume ; any media type (“*/*”) is assumed + return true + } + + if len(mimeTypes) == 0 { + // idempotent methods with (most-likely or garanteed) empty content match missing Content-Type + m := r.Method + if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" { + return true + } + // proceed with default + mimeTypes = MIME_OCTET + } + + parts := strings.Split(mimeTypes, ",") + for _, each := range parts { + var contentType string + if strings.Contains(each, ";") { + contentType = strings.Split(each, ";")[0] + } else { + contentType = each + } + // trim before compare + contentType = strings.Trim(contentType, " ") + for _, consumeableType := range r.Consumes { + if consumeableType == "*/*" || consumeableType == contentType { + return true + } + } + } + return false +} + +// Extract the parameters from the request url path +func (r Route) extractParameters(urlPath string) map[string]string { + urlParts := tokenizePath(urlPath) + pathParameters := map[string]string{} + for i, key := range r.pathParts { + var value string + if i >= len(urlParts) { + value = "" + } else { + value = urlParts[i] + } + if strings.HasPrefix(key, "{") { // path-parameter + if colon := strings.Index(key, ":"); colon != -1 { + // extract by regex + regPart := key[colon+1 : len(key)-1] + keyPart := key[1:colon] + if regPart == "*" { + pathParameters[keyPart] = untokenizePath(i, urlParts) + break + } else { + pathParameters[keyPart] = value + } + } else { + // without enclosing {} + pathParameters[key[1:len(key)-1]] = value + } + } + } + return pathParameters +} + +// Untokenize back into an URL path using the slash separator +func untokenizePath(offset int, parts []string) string { + var buffer bytes.Buffer + for p := offset; p < len(parts); p++ { + buffer.WriteString(parts[p]) + // do not end + if p < len(parts)-1 { + buffer.WriteString("/") + } + } + return buffer.String() +} + +// Tokenize an URL path using the slash separator ; the result does not have empty tokens +func tokenizePath(path string) []string { + if "/" == path { + return []string{} + } + return strings.Split(strings.Trim(path, "/"), "/") +} + +// for debugging +func (r Route) String() string { + return r.Method + " " + r.Path +} diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/route_builder.go new file mode 100644 index 000000000..5ad4a3a7c --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/route_builder.go @@ -0,0 +1,293 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "fmt" + "os" + "reflect" + "runtime" + "strings" + "sync/atomic" + + "github.com/emicklei/go-restful/log" +) + +// RouteBuilder is a helper to construct Routes. +type RouteBuilder struct { + rootPath string + currentPath string + produces []string + consumes []string + httpMethod string // required + function RouteFunction // required + filters []FilterFunction + + typeNameHandleFunc TypeNameHandleFunction // required + + // documentation + doc string + notes string + operation string + readSample, writeSample interface{} + parameters []*Parameter + errorMap map[int]ResponseError + metadata map[string]interface{} +} + +// Do evaluates each argument with the RouteBuilder itself. +// This allows you to follow DRY principles without breaking the fluent programming style. +// Example: +// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) +// +// func Returns500(b *RouteBuilder) { +// b.Returns(500, "Internal Server Error", restful.ServiceError{}) +// } +func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder { + for _, each := range oneArgBlocks { + each(b) + } + return b +} + +// To bind the route to a function. +// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required. +func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder { + b.function = function + return b +} + +// Method specifies what HTTP method to match. Required. +func (b *RouteBuilder) Method(method string) *RouteBuilder { + b.httpMethod = method + return b +} + +// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header. +func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder { + b.produces = mimeTypes + return b +} + +// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these +func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder { + b.consumes = mimeTypes + return b +} + +// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/". +func (b *RouteBuilder) Path(subPath string) *RouteBuilder { + b.currentPath = subPath + return b +} + +// Doc tells what this route is all about. Optional. +func (b *RouteBuilder) Doc(documentation string) *RouteBuilder { + b.doc = documentation + return b +} + +// A verbose explanation of the operation behavior. Optional. +func (b *RouteBuilder) Notes(notes string) *RouteBuilder { + b.notes = notes + return b +} + +// Reads tells what resource type will be read from the request payload. Optional. +// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type. +func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder { + fn := b.typeNameHandleFunc + if fn == nil { + fn = reflectTypeName + } + typeAsName := fn(sample) + + b.readSample = sample + bodyParameter := &Parameter{&ParameterData{Name: "body"}} + bodyParameter.beBody() + bodyParameter.Required(true) + bodyParameter.DataType(typeAsName) + b.Param(bodyParameter) + return b +} + +// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not. +// Use this to modify or extend information for the Parameter (through its Data()). +func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) { + for _, each := range b.parameters { + if each.Data().Name == name { + return each + } + } + return p +} + +// Writes tells what resource type will be written as the response payload. Optional. +func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder { + b.writeSample = sample + return b +} + +// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates). +func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder { + if b.parameters == nil { + b.parameters = []*Parameter{} + } + b.parameters = append(b.parameters, parameter) + return b +} + +// Operation allows you to document what the actual method/function call is of the Route. +// Unless called, the operation name is derived from the RouteFunction set using To(..). +func (b *RouteBuilder) Operation(name string) *RouteBuilder { + b.operation = name + return b +} + +// ReturnsError is deprecated, use Returns instead. +func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder { + log.Print("ReturnsError is deprecated, use Returns instead.") + return b.Returns(code, message, model) +} + +// Returns allows you to document what responses (errors or regular) can be expected. +// The model parameter is optional ; either pass a struct instance or use nil if not applicable. +func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder { + err := ResponseError{ + Code: code, + Message: message, + Model: model, + IsDefault: false, + } + // lazy init because there is no NewRouteBuilder (yet) + if b.errorMap == nil { + b.errorMap = map[int]ResponseError{} + } + b.errorMap[code] = err + return b +} + +// DefaultReturns is a special Returns call that sets the default of the response ; the code is zero. +func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder { + b.Returns(0, message, model) + // Modify the ResponseError just added/updated + re := b.errorMap[0] + // errorMap is initialized + b.errorMap[0] = ResponseError{ + Code: re.Code, + Message: re.Message, + Model: re.Model, + IsDefault: true, + } + return b +} + +// Metadata adds or updates a key=value pair to the metadata map. +func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder { + if b.metadata == nil { + b.metadata = map[string]interface{}{} + } + b.metadata[key] = value + return b +} + +// ResponseError represents a response; not necessarily an error. +type ResponseError struct { + Code int + Message string + Model interface{} + IsDefault bool +} + +func (b *RouteBuilder) servicePath(path string) *RouteBuilder { + b.rootPath = path + return b +} + +// Filter appends a FilterFunction to the end of filters for this Route to build. +func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder { + b.filters = append(b.filters, filter) + return b +} + +// If no specific Route path then set to rootPath +// If no specific Produces then set to rootProduces +// If no specific Consumes then set to rootConsumes +func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) { + if len(b.produces) == 0 { + b.produces = rootProduces + } + if len(b.consumes) == 0 { + b.consumes = rootConsumes + } +} + +// typeNameHandler sets the function that will convert types to strings in the parameter +// and model definitions. +func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder { + b.typeNameHandleFunc = handler + return b +} + +// Build creates a new Route using the specification details collected by the RouteBuilder +func (b *RouteBuilder) Build() Route { + pathExpr, err := newPathExpression(b.currentPath) + if err != nil { + log.Printf("[restful] Invalid path:%s because:%v", b.currentPath, err) + os.Exit(1) + } + if b.function == nil { + log.Printf("[restful] No function specified for route:" + b.currentPath) + os.Exit(1) + } + operationName := b.operation + if len(operationName) == 0 && b.function != nil { + // extract from definition + operationName = nameOfFunction(b.function) + } + route := Route{ + Method: b.httpMethod, + Path: concatPath(b.rootPath, b.currentPath), + Produces: b.produces, + Consumes: b.consumes, + Function: b.function, + Filters: b.filters, + relativePath: b.currentPath, + pathExpr: pathExpr, + Doc: b.doc, + Notes: b.notes, + Operation: operationName, + ParameterDocs: b.parameters, + ResponseErrors: b.errorMap, + ReadSample: b.readSample, + WriteSample: b.writeSample, + Metadata: b.metadata} + route.postBuild() + return route +} + +func concatPath(path1, path2 string) string { + return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/") +} + +var anonymousFuncCount int32 + +// nameOfFunction returns the short name of the function f for documentation. +// It uses a runtime feature for debugging ; its value may change for later Go versions. +func nameOfFunction(f interface{}) string { + fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer()) + tokenized := strings.Split(fun.Name(), ".") + last := tokenized[len(tokenized)-1] + last = strings.TrimSuffix(last, ")·fm") // < Go 1.5 + last = strings.TrimSuffix(last, ")-fm") // Go 1.5 + last = strings.TrimSuffix(last, "·fm") // < Go 1.5 + last = strings.TrimSuffix(last, "-fm") // Go 1.5 + if last == "func1" { // this could mean conflicts in API docs + val := atomic.AddInt32(&anonymousFuncCount, 1) + last = "func" + fmt.Sprintf("%d", val) + atomic.StoreInt32(&anonymousFuncCount, val) + } + return last +} diff --git a/vendor/github.com/emicklei/go-restful/router.go b/vendor/github.com/emicklei/go-restful/router.go new file mode 100644 index 000000000..9b32fb675 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/router.go @@ -0,0 +1,18 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import "net/http" + +// A RouteSelector finds the best matching Route given the input HTTP Request +type RouteSelector interface { + + // SelectRoute finds a Route given the input HTTP Request and a list of WebServices. + // It returns a selected Route and its containing WebService or an error indicating + // a problem. + SelectRoute( + webServices []*WebService, + httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) +} diff --git a/vendor/github.com/emicklei/go-restful/service_error.go b/vendor/github.com/emicklei/go-restful/service_error.go new file mode 100644 index 000000000..62d1108bb --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/service_error.go @@ -0,0 +1,23 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import "fmt" + +// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request. +type ServiceError struct { + Code int + Message string +} + +// NewError returns a ServiceError using the code and reason +func NewError(code int, message string) ServiceError { + return ServiceError{Code: code, Message: message} +} + +// Error returns a text representation of the service error +func (s ServiceError) Error() string { + return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message) +} diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/web_service.go new file mode 100644 index 000000000..7af60233a --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/web_service.go @@ -0,0 +1,290 @@ +package restful + +import ( + "errors" + "os" + "reflect" + "sync" + + "github.com/emicklei/go-restful/log" +) + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// WebService holds a collection of Route values that bind a Http Method + URL Path to a function. +type WebService struct { + rootPath string + pathExpr *pathExpression // cached compilation of rootPath as RegExp + routes []Route + produces []string + consumes []string + pathParameters []*Parameter + filters []FilterFunction + documentation string + apiVersion string + + typeNameHandleFunc TypeNameHandleFunction + + dynamicRoutes bool + + // protects 'routes' if dynamic routes are enabled + routesLock sync.RWMutex +} + +func (w *WebService) SetDynamicRoutes(enable bool) { + w.dynamicRoutes = enable +} + +// TypeNameHandleFunction declares functions that can handle translating the name of a sample object +// into the restful documentation for the service. +type TypeNameHandleFunction func(sample interface{}) string + +// TypeNameHandler sets the function that will convert types to strings in the parameter +// and model definitions. If not set, the web service will invoke +// reflect.TypeOf(object).String(). +func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService { + w.typeNameHandleFunc = handler + return w +} + +// reflectTypeName is the default TypeNameHandleFunction and for a given object +// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via +// the reflection API. +func reflectTypeName(sample interface{}) string { + return reflect.TypeOf(sample).String() +} + +// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it. +func (w *WebService) compilePathExpression() { + compiled, err := newPathExpression(w.rootPath) + if err != nil { + log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err) + os.Exit(1) + } + w.pathExpr = compiled +} + +// ApiVersion sets the API version for documentation purposes. +func (w *WebService) ApiVersion(apiVersion string) *WebService { + w.apiVersion = apiVersion + return w +} + +// Version returns the API version for documentation purposes. +func (w *WebService) Version() string { return w.apiVersion } + +// Path specifies the root URL template path of the WebService. +// All Routes will be relative to this path. +func (w *WebService) Path(root string) *WebService { + w.rootPath = root + if len(w.rootPath) == 0 { + w.rootPath = "/" + } + w.compilePathExpression() + return w +} + +// Param adds a PathParameter to document parameters used in the root path. +func (w *WebService) Param(parameter *Parameter) *WebService { + if w.pathParameters == nil { + w.pathParameters = []*Parameter{} + } + w.pathParameters = append(w.pathParameters, parameter) + return w +} + +// PathParameter creates a new Parameter of kind Path for documentation purposes. +// It is initialized as required with string as its DataType. +func (w *WebService) PathParameter(name, description string) *Parameter { + return PathParameter(name, description) +} + +// PathParameter creates a new Parameter of kind Path for documentation purposes. +// It is initialized as required with string as its DataType. +func PathParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}} + p.bePath() + return p +} + +// QueryParameter creates a new Parameter of kind Query for documentation purposes. +// It is initialized as not required with string as its DataType. +func (w *WebService) QueryParameter(name, description string) *Parameter { + return QueryParameter(name, description) +} + +// QueryParameter creates a new Parameter of kind Query for documentation purposes. +// It is initialized as not required with string as its DataType. +func QueryParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} + p.beQuery() + return p +} + +// BodyParameter creates a new Parameter of kind Body for documentation purposes. +// It is initialized as required without a DataType. +func (w *WebService) BodyParameter(name, description string) *Parameter { + return BodyParameter(name, description) +} + +// BodyParameter creates a new Parameter of kind Body for documentation purposes. +// It is initialized as required without a DataType. +func BodyParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}} + p.beBody() + return p +} + +// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes. +// It is initialized as not required with string as its DataType. +func (w *WebService) HeaderParameter(name, description string) *Parameter { + return HeaderParameter(name, description) +} + +// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes. +// It is initialized as not required with string as its DataType. +func HeaderParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} + p.beHeader() + return p +} + +// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes. +// It is initialized as required with string as its DataType. +func (w *WebService) FormParameter(name, description string) *Parameter { + return FormParameter(name, description) +} + +// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes. +// It is initialized as required with string as its DataType. +func FormParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} + p.beForm() + return p +} + +// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes. +func (w *WebService) Route(builder *RouteBuilder) *WebService { + w.routesLock.Lock() + defer w.routesLock.Unlock() + builder.copyDefaults(w.produces, w.consumes) + w.routes = append(w.routes, builder.Build()) + return w +} + +// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method' +func (w *WebService) RemoveRoute(path, method string) error { + if !w.dynamicRoutes { + return errors.New("dynamic routes are not enabled.") + } + w.routesLock.Lock() + defer w.routesLock.Unlock() + newRoutes := make([]Route, (len(w.routes) - 1)) + current := 0 + for ix := range w.routes { + if w.routes[ix].Method == method && w.routes[ix].Path == path { + continue + } + newRoutes[current] = w.routes[ix] + current = current + 1 + } + w.routes = newRoutes + return nil +} + +// Method creates a new RouteBuilder and initialize its http method +func (w *WebService) Method(httpMethod string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod) +} + +// Produces specifies that this WebService can produce one or more MIME types. +// Http requests must have one of these values set for the Accept header. +func (w *WebService) Produces(contentTypes ...string) *WebService { + w.produces = contentTypes + return w +} + +// Consumes specifies that this WebService can consume one or more MIME types. +// Http requests must have one of these values set for the Content-Type header. +func (w *WebService) Consumes(accepts ...string) *WebService { + w.consumes = accepts + return w +} + +// Routes returns the Routes associated with this WebService +func (w *WebService) Routes() []Route { + if !w.dynamicRoutes { + return w.routes + } + // Make a copy of the array to prevent concurrency problems + w.routesLock.RLock() + defer w.routesLock.RUnlock() + result := make([]Route, len(w.routes)) + for ix := range w.routes { + result[ix] = w.routes[ix] + } + return result +} + +// RootPath returns the RootPath associated with this WebService. Default "/" +func (w *WebService) RootPath() string { + return w.rootPath +} + +// PathParameters return the path parameter names for (shared amoung its Routes) +func (w *WebService) PathParameters() []*Parameter { + return w.pathParameters +} + +// Filter adds a filter function to the chain of filters applicable to all its Routes +func (w *WebService) Filter(filter FilterFunction) *WebService { + w.filters = append(w.filters, filter) + return w +} + +// Doc is used to set the documentation of this service. +func (w *WebService) Doc(plainText string) *WebService { + w.documentation = plainText + return w +} + +// Documentation returns it. +func (w *WebService) Documentation() string { + return w.documentation +} + +/* + Convenience methods +*/ + +// HEAD is a shortcut for .Method("HEAD").Path(subPath) +func (w *WebService) HEAD(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath) +} + +// GET is a shortcut for .Method("GET").Path(subPath) +func (w *WebService) GET(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath) +} + +// POST is a shortcut for .Method("POST").Path(subPath) +func (w *WebService) POST(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath) +} + +// PUT is a shortcut for .Method("PUT").Path(subPath) +func (w *WebService) PUT(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath) +} + +// PATCH is a shortcut for .Method("PATCH").Path(subPath) +func (w *WebService) PATCH(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath) +} + +// DELETE is a shortcut for .Method("DELETE").Path(subPath) +func (w *WebService) DELETE(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath) +} diff --git a/vendor/github.com/emicklei/go-restful/web_service_container.go b/vendor/github.com/emicklei/go-restful/web_service_container.go new file mode 100644 index 000000000..c9d31b06c --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/web_service_container.go @@ -0,0 +1,39 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "net/http" +) + +// DefaultContainer is a restful.Container that uses http.DefaultServeMux +var DefaultContainer *Container + +func init() { + DefaultContainer = NewContainer() + DefaultContainer.ServeMux = http.DefaultServeMux +} + +// If set the true then panics will not be caught to return HTTP 500. +// In that case, Route functions are responsible for handling any error situation. +// Default value is false = recover from panics. This has performance implications. +// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true) +var DoNotRecover = false + +// Add registers a new WebService add it to the DefaultContainer. +func Add(service *WebService) { + DefaultContainer.Add(service) +} + +// Filter appends a container FilterFunction from the DefaultContainer. +// These are called before dispatching a http.Request to a WebService. +func Filter(filter FilterFunction) { + DefaultContainer.Filter(filter) +} + +// RegisteredWebServices returns the collections of WebServices from the DefaultContainer +func RegisteredWebServices() []*WebService { + return DefaultContainer.RegisteredWebServices() +} diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE new file mode 100644 index 000000000..7805d36de --- /dev/null +++ b/vendor/github.com/ghodss/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md new file mode 100644 index 000000000..f8f7e3695 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/README.md @@ -0,0 +1,116 @@ +# YAML marshaling and unmarshaling support for Go + +[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) + +## Introduction + +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. + +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). + +## Compatibility + +This package uses [go-yaml v2](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). + +## Caveats + +**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: + +``` +BAD: + exampleKey: !!binary gIGC + +GOOD: + exampleKey: gIGC +... and decode the base64 data in your code. +``` + +**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. + +## Installation and usage + +To install, run: + +``` +$ go get github.com/ghodss/yaml +``` + +And import using: + +``` +import "github.com/ghodss/yaml" +``` + +Usage is very similar to the JSON library: + +```go +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +type Person struct { + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"name"` +} + +func main() { + // Marshal a Person struct to YAML. + p := Person{"John", 30} + y, err := yaml.Marshal(p) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + name: John + age: 30 + */ + + // Unmarshal the YAML back into a Person struct. + var p2 Person + err := yaml.Unmarshal(y, &p2) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(p2) + /* Output: + {John 30} + */ +} +``` + +`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: + +```go +import ( + "fmt" + + "github.com/ghodss/yaml" +) +func main() { + j := []byte(`{"name": "John", "age": 30}`) + y, err := yaml.JSONToYAML(j) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + name: John + age: 30 + */ + j2, err := yaml.YAMLToJSON(y) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(j2)) + /* Output: + {"age":30,"name":"John"} + */ +} +``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go new file mode 100644 index 000000000..0bd3c2b46 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/fields.go @@ -0,0 +1,497 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package yaml + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go new file mode 100644 index 000000000..c02beacb9 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -0,0 +1,277 @@ +package yaml + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strconv" + + "gopkg.in/yaml.v2" +) + +// Marshals the object into JSON then converts JSON to YAML and returns the +// YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: ", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: ", err) + } + + return y, nil +} + +// Converts YAML to JSON then uses JSON to unmarshal into an object. +func Unmarshal(y []byte, o interface{}) error { + vo := reflect.ValueOf(o) + j, err := yamlToJSON(y, &vo) + if err != nil { + return fmt.Errorf("error converting YAML to JSON: %v", err) + } + + err = json.Unmarshal(j, o) + if err != nil { + return fmt.Errorf("error unmarshaling JSON: %v", err) + } + + return nil +} + +// Convert JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} + +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through +// this method should be a no-op. +// +// Things YAML can do that are not supported by JSON: +// * In YAML you can have binary and null keys in your maps. These are invalid +// in JSON. (int and float keys are converted to strings.) +// * Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +func YAMLToJSON(y []byte) ([]byte, error) { + return yamlToJSON(y, nil) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { + // Convert the YAML to an object. + var yamlObj interface{} + err := yaml.Unmarshal(y, &yamlObj) + if err != nil { + return nil, err + } + + // YAML objects are not completely compatible with JSON objects (e.g. you + // can have non-string keys in YAML). So, convert the YAML-compatible object + // to a JSON-compatible object, failing with an error if irrecoverable + // incompatibilties happen along the way. + jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) + if err != nil { + return nil, err + } + + // Convert this object to JSON and return the data. + return json.Marshal(jsonObj) +} + +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { + var err error + + // Resolve jsonTarget to a concrete value (i.e. not a pointer or an + // interface). We pass decodingNull as false because we're not actually + // decoding into the value, we're just checking if the ultimate target is a + // string. + if jsonTarget != nil { + ju, tu, pv := indirect(*jsonTarget, false) + // We have a JSON or Text Umarshaler at this level, so we can't be trying + // to decode into a string. + if ju != nil || tu != nil { + jsonTarget = nil + } else { + jsonTarget = &pv + } + } + + // If yamlObj is a number or a boolean, check if jsonTarget is a string - + // if so, coerce. Else return normal. + // If yamlObj is a map or array, find the field that each key is + // unmarshaling to, and when you recurse pass the reflect.Value for that + // field back into this function. + switch typedYAMLObj := yamlObj.(type) { + case map[interface{}]interface{}: + // JSON does not support arbitrary keys in a map, so we must convert + // these keys to strings. + // + // From my reading of go-yaml v2 (specifically the resolve function), + // keys can only have the types string, int, int64, float64, binary + // (unsupported), or null (unsupported). + strMap := make(map[string]interface{}) + for k, v := range typedYAMLObj { + // Resolve the key to a string first. + var keyString string + switch typedKey := k.(type) { + case string: + keyString = typedKey + case int: + keyString = strconv.Itoa(typedKey) + case int64: + // go-yaml will only return an int64 as a key if the system + // architecture is 32-bit and the key's value is between 32-bit + // and 64-bit. Otherwise the key type will simply be int. + keyString = strconv.FormatInt(typedKey, 10) + case float64: + // Stolen from go-yaml to use the same conversion to string as + // the go-yaml library uses to convert float to string when + // Marshaling. + s := strconv.FormatFloat(typedKey, 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + keyString = s + case bool: + if typedKey { + keyString = "true" + } else { + keyString = "false" + } + default: + return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + reflect.TypeOf(k), k, v) + } + + // jsonTarget should be a struct or a map. If it's a struct, find + // the field it's going to map to and pass its reflect.Value. If + // it's a map, find the element type of the map and pass the + // reflect.Value created from that type. If it's neither, just pass + // nil - JSON conversion will error for us if it's a real issue. + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Struct { + keyBytes := []byte(keyString) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, keyBytes) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential + // struct field. + jtf := t.Field(f.index[0]) + strMap[keyString], err = convertToJSONableObject(v, &jtf) + if err != nil { + return nil, err + } + continue + } + } else if t.Kind() == reflect.Map { + // Create a zero value of the map's element type to use as + // the JSON target. + jtv := reflect.Zero(t.Type().Elem()) + strMap[keyString], err = convertToJSONableObject(v, &jtv) + if err != nil { + return nil, err + } + continue + } + } + strMap[keyString], err = convertToJSONableObject(v, nil) + if err != nil { + return nil, err + } + } + return strMap, nil + case []interface{}: + // We need to recurse into arrays in case there are any + // map[interface{}]interface{}'s inside and to convert any + // numbers to strings. + + // If jsonTarget is a slice (which it really should be), find the + // thing it's going to map to. If it's not a slice, just pass nil + // - JSON conversion will error for us if it's a real issue. + var jsonSliceElemValue *reflect.Value + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Slice { + // By default slices point to nil, but we need a reflect.Value + // pointing to a value of the slice type, so we create one here. + ev := reflect.Indirect(reflect.New(t.Type().Elem())) + jsonSliceElemValue = &ev + } + } + + // Make and use a new array. + arr := make([]interface{}, len(typedYAMLObj)) + for i, v := range typedYAMLObj { + arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) + if err != nil { + return nil, err + } + } + return arr, nil + default: + // If the target type is a string and the YAML type is a number, + // convert the YAML type to a string. + if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { + // Based on my reading of go-yaml, it may return int, int64, + // float64, or uint64. + var s string + switch typedVal := typedYAMLObj.(type) { + case int: + s = strconv.FormatInt(int64(typedVal), 10) + case int64: + s = strconv.FormatInt(typedVal, 10) + case float64: + s = strconv.FormatFloat(typedVal, 'g', -1, 32) + case uint64: + s = strconv.FormatUint(typedVal, 10) + case bool: + if typedVal { + s = "true" + } else { + s = "false" + } + } + if len(s) > 0 { + yamlObj = interface{}(s) + } + } + return yamlObj, nil + } + + return nil, nil +} diff --git a/vendor/github.com/go-openapi/jsonpointer/LICENSE b/vendor/github.com/go-openapi/jsonpointer/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md new file mode 100644 index 000000000..9c9b1fd48 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -0,0 +1,15 @@ +# gojsonpointer [![Build Status](https://ci.vmware.run/api/badges/go-openapi/jsonpointer/status.svg)](https://ci.vmware.run/go-openapi/jsonpointer) [![Coverage](https://coverage.vmware.run/badges/go-openapi/jsonpointer/coverage.svg)](https://coverage.vmware.run/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) +An implementation of JSON Pointer - Go language + +## Status +Completed YES + +Tested YES + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +### Note +The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go new file mode 100644 index 000000000..39dd012c2 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -0,0 +1,238 @@ +// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author sigu-399 +// author-github https://github.com/sigu-399 +// author-mail sigu.399@gmail.com +// +// repository-name jsonpointer +// repository-desc An implementation of JSON Pointer - Go language +// +// description Main and unique file. +// +// created 25-02-2013 + +package jsonpointer + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/swag" +) + +const ( + emptyPointer = `` + pointerSeparator = `/` + + invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator +) + +var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() + +// JSONPointable is an interface for structs to implement when they need to customize the +// json pointer process +type JSONPointable interface { + JSONLookup(string) (interface{}, error) +} + +type implStruct struct { + mode string // "SET" or "GET" + + inDocument interface{} + + setInValue interface{} + + getOutNode interface{} + getOutKind reflect.Kind + outError error +} + +// New creates a new json pointer for the given string +func New(jsonPointerString string) (Pointer, error) { + + var p Pointer + err := p.parse(jsonPointerString) + return p, err + +} + +// Pointer the json pointer reprsentation +type Pointer struct { + referenceTokens []string +} + +// "Constructor", parses the given string JSON pointer +func (p *Pointer) parse(jsonPointerString string) error { + + var err error + + if jsonPointerString != emptyPointer { + if !strings.HasPrefix(jsonPointerString, pointerSeparator) { + err = errors.New(invalidStart) + } else { + referenceTokens := strings.Split(jsonPointerString, pointerSeparator) + for _, referenceToken := range referenceTokens[1:] { + p.referenceTokens = append(p.referenceTokens, referenceToken) + } + } + } + + return err +} + +// Get uses the pointer to retrieve a value from a JSON document +func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { + return p.get(document, swag.DefaultJSONNameProvider) +} + +// GetForToken gets a value for a json pointer token 1 level deep +func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { + return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) +} + +func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { + kind := reflect.Invalid + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind = rValue.Kind() + switch kind { + + case reflect.Struct: + if rValue.Type().Implements(jsonPointableType) { + r, err := node.(JSONPointable).JSONLookup(decodedToken) + if err != nil { + return nil, kind, err + } + return r, kind, nil + } + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return nil, kind, fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + return fld.Interface(), kind, nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) + if mv.IsValid() && !swag.IsZero(mv) { + return mv.Interface(), kind, nil + } + return nil, kind, fmt.Errorf("object has no key %q", decodedToken) + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return nil, kind, err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + return elem.Interface(), kind, nil + + default: + return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken) + } + +} + +func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { + + if nameProvider == nil { + nameProvider = swag.DefaultJSONNameProvider + } + + kind := reflect.Invalid + + // Full document when empty + if len(p.referenceTokens) == 0 { + return node, kind, nil + } + + for _, token := range p.referenceTokens { + + decodedToken := Unescape(token) + + r, knd, err := getSingleImpl(node, decodedToken, nameProvider) + if err != nil { + return nil, knd, err + } + node, kind = r, knd + + } + + rValue := reflect.ValueOf(node) + kind = rValue.Kind() + + return node, kind, nil +} + +// DecodedTokens returns the decoded tokens +func (p *Pointer) DecodedTokens() []string { + result := make([]string, 0, len(p.referenceTokens)) + for _, t := range p.referenceTokens { + result = append(result, Unescape(t)) + } + return result +} + +// IsEmpty returns true if this is an empty json pointer +// this indicates that it points to the root document +func (p *Pointer) IsEmpty() bool { + return len(p.referenceTokens) == 0 +} + +// Pointer to string representation function +func (p *Pointer) String() string { + + if len(p.referenceTokens) == 0 { + return emptyPointer + } + + pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) + + return pointerString +} + +// Specific JSON pointer encoding here +// ~0 => ~ +// ~1 => / +// ... and vice versa + +const ( + encRefTok0 = `~0` + encRefTok1 = `~1` + decRefTok0 = `~` + decRefTok1 = `/` +) + +// Unescape unescapes a json pointer reference token string to the original representation +func Unescape(token string) string { + step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) + step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) + return step2 +} + +// Escape escapes a pointer reference token string +func Escape(token string) string { + step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) + step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) + return step2 +} diff --git a/vendor/github.com/go-openapi/jsonreference/LICENSE b/vendor/github.com/go-openapi/jsonreference/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md new file mode 100644 index 000000000..5f7881274 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -0,0 +1,15 @@ +# gojsonreference [![Build Status](https://ci.vmware.run/api/badges/go-openapi/jsonreference/status.svg)](https://ci.vmware.run/go-openapi/jsonreference) [![Coverage](https://coverage.vmware.run/badges/go-openapi/jsonreference/coverage.svg)](https://coverage.vmware.run/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) +An implementation of JSON Reference - Go language + +## Status +Work in progress ( 90% done ) + +## Dependencies +https://github.com/xeipuuv/gojsonpointer + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go new file mode 100644 index 000000000..3bc0a6e26 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -0,0 +1,156 @@ +// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author sigu-399 +// author-github https://github.com/sigu-399 +// author-mail sigu.399@gmail.com +// +// repository-name jsonreference +// repository-desc An implementation of JSON Reference - Go language +// +// description Main and unique file. +// +// created 26-02-2013 + +package jsonreference + +import ( + "errors" + "net/url" + "strings" + + "github.com/PuerkitoBio/purell" + "github.com/go-openapi/jsonpointer" +) + +const ( + fragmentRune = `#` +) + +// New creates a new reference for the given string +func New(jsonReferenceString string) (Ref, error) { + + var r Ref + err := r.parse(jsonReferenceString) + return r, err + +} + +// MustCreateRef parses the ref string and panics when it's invalid. +// Use the New method for a version that returns an error +func MustCreateRef(ref string) Ref { + r, err := New(ref) + if err != nil { + panic(err) + } + return r +} + +// Ref represents a json reference object +type Ref struct { + referenceURL *url.URL + referencePointer jsonpointer.Pointer + + HasFullURL bool + HasURLPathOnly bool + HasFragmentOnly bool + HasFileScheme bool + HasFullFilePath bool +} + +// GetURL gets the URL for this reference +func (r *Ref) GetURL() *url.URL { + return r.referenceURL +} + +// GetPointer gets the json pointer for this reference +func (r *Ref) GetPointer() *jsonpointer.Pointer { + return &r.referencePointer +} + +// String returns the best version of the url for this reference +func (r *Ref) String() string { + + if r.referenceURL != nil { + return r.referenceURL.String() + } + + if r.HasFragmentOnly { + return fragmentRune + r.referencePointer.String() + } + + return r.referencePointer.String() +} + +// IsRoot returns true if this reference is a root document +func (r *Ref) IsRoot() bool { + return r.referenceURL != nil && + !r.IsCanonical() && + !r.HasURLPathOnly && + r.referenceURL.Fragment == "" +} + +// IsCanonical returns true when this pointer starts with http(s):// or file:// +func (r *Ref) IsCanonical() bool { + return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL) +} + +// "Constructor", parses the given string JSON reference +func (r *Ref) parse(jsonReferenceString string) error { + + parsed, err := url.Parse(jsonReferenceString) + if err != nil { + return err + } + + r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) + refURL := r.referenceURL + + if refURL.Scheme != "" && refURL.Host != "" { + r.HasFullURL = true + } else { + if refURL.Path != "" { + r.HasURLPathOnly = true + } else if refURL.RawQuery == "" && refURL.Fragment != "" { + r.HasFragmentOnly = true + } + } + + r.HasFileScheme = refURL.Scheme == "file" + r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/") + + // invalid json-pointer error means url has no json-pointer fragment. simply ignore error + r.referencePointer, _ = jsonpointer.New(refURL.Fragment) + + return nil +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + childURL := child.GetURL() + parentURL := r.GetURL() + if childURL == nil { + return nil, errors.New("child url is nil") + } + if parentURL == nil { + return &child, nil + } + + ref, err := New(parentURL.ResolveReference(childURL).String()) + if err != nil { + return nil, err + } + return &ref, nil +} diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md new file mode 100644 index 000000000..4b2af124a --- /dev/null +++ b/vendor/github.com/go-openapi/spec/README.md @@ -0,0 +1,5 @@ +# OAI object model [![Build Status](https://ci.vmware.run/api/badges/go-openapi/spec/status.svg)](https://ci.vmware.run/go-openapi/spec) [![Coverage](https://coverage.vmware.run/badges/go-openapi/spec/coverage.svg)](https://coverage.vmware.run/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec) + +The object model for OpenAPI specification documents \ No newline at end of file diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go new file mode 100644 index 000000000..294cbccf7 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/bindata.go @@ -0,0 +1,274 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by go-bindata. +// sources: +// schemas/jsonschema-draft-04.json +// schemas/v2/schema.json +// DO NOT EDIT! + +package spec + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xcc\x57\x3b\x6f\xdb\x30\x10\xde\xfd\x2b\x04\xa5\x63\x52\xb9\x40\xa7\x6c\x45\xbb\x18\x68\xd1\x0c\xdd\x0c\x0f\xb4\x75\xb2\x19\x50\xa4\x42\x51\x85\x0d\x43\xff\xbd\xa4\xa8\x07\x29\x91\x92\x2d\xbb\x48\xb4\xc4\xe1\xbd\xbe\x3b\xde\x8b\xe7\x45\x20\xbf\x10\xc7\xe1\x73\x10\x1e\x84\xc8\x9e\xa3\xe8\x35\x67\xf4\x29\xdf\x1d\x20\x45\x9f\x19\xdf\x47\x31\x47\x89\x78\x5a\x7e\x8d\xf4\xd9\x43\xf8\xa8\x85\x3e\xe9\xff\x67\x48\xc6\x90\xef\x38\xce\x04\x66\x54\x49\x7f\x67\x1c\x02\xcd\x12\xa4\x20\x50\xad\xa2\xe3\x4e\x30\xc5\x8a\x39\x97\xdc\x1a\x71\x45\xd0\x6c\xdf\x38\x47\x27\x8b\x50\x11\xc5\x29\x03\xa5\x1c\x55\xe4\x47\x9b\x98\x62\xba\x12\x90\x2a\x7d\x5f\x7a\x24\x5c\x9f\x9f\xa5\x83\x1c\x12\xa5\xe2\x21\x0c\xca\x96\xa9\xec\xf8\xc3\x8c\xe5\x12\xd7\x5f\x58\x51\x01\x7b\xe0\x7e\x10\xb8\x66\x18\xc2\xc0\x69\x91\x4a\x8e\xe5\x25\xfa\x7f\x40\x82\x0a\x22\x96\x43\x3b\x88\x90\xdf\x0a\xea\xda\x82\x1d\x19\x91\x8b\xfa\x58\xa5\x21\xc5\x1c\x6b\x9d\x0a\x42\x50\x06\x1b\x27\x8c\x1c\xa7\x19\x81\x3f\xd2\x97\x7c\x68\x1a\x68\xe5\xc0\xba\x8d\x74\x10\x6e\x19\x23\x80\xa8\xfa\xd9\x3a\x1e\x84\xb4\x20\x44\xff\x4d\xb7\xfa\x84\x6d\x5f\x61\x27\xd4\xaf\x5c\x70\x4c\xf7\xa1\xcf\x7e\x45\x9d\x73\xcf\xc6\x65\x36\x7c\x8d\xa9\xf2\xf2\x94\x28\x28\x7e\x2b\xa0\xa1\x0a\x5e\x40\x07\x73\x61\x80\x6d\x6d\x34\x8e\xe9\xd3\x8c\xb3\x0c\xb8\xc0\xbd\xe8\xe9\xa2\xf3\x78\x53\xa3\xec\x01\x49\x18\x4f\x91\xba\xab\xb0\xe0\x38\x74\xc6\xaa\x2b\xca\x7b\x6b\x16\x58\x10\x98\xd4\xeb\x14\xb5\xeb\x7d\x96\x82\x26\x4b\xcf\xe6\x71\x2a\xcf\xb0\x4c\xcd\x2a\xf7\x3d\x6a\x9b\x74\xf3\x56\x5e\x8f\x02\xc7\x1d\x29\x72\x59\x28\xbf\x5a\x16\xfb\xc6\x4d\xfb\xe8\x58\xb3\x8c\x1b\x77\x0a\x77\x86\xa6\xb4\xb4\xf5\x64\x93\xbb\xa0\x24\x88\xe4\x1e\x84\xad\x13\x37\x21\x9c\xd2\x72\x0b\x42\x74\xfc\x09\x74\x2f\x0e\xbd\x9e\x3b\xd5\xbc\x2c\x1f\xaf\xd6\xd0\xb6\x52\xbb\xdf\x22\x21\x80\x4f\xe7\xa8\xb7\x78\xb8\xd4\x7d\x74\x07\x13\xc5\x71\x05\x05\x91\xa6\x91\xf4\x7b\x38\x3d\xe9\x1e\x6e\x1d\xab\xef\x3c\x0c\x74\xbf\x7d\xd5\x6c\xce\x89\xa5\xbe\x8d\xf7\x66\xce\xee\xd1\x86\x67\x80\x34\xad\x8f\xc3\xb3\xae\xc6\x1c\xe3\xb7\xc2\x96\xd9\xb4\x72\x0c\xf0\xab\x92\xe9\x5a\x05\xee\x5c\xb2\x87\xc6\x7f\xa9\x9b\x17\x6b\xb0\xcc\x75\x77\x96\x16\xb7\xcf\x1c\xde\x0a\xcc\x21\x1e\x53\x64\x0e\x73\x4f\x81\xbc\xb8\x07\xa6\xe6\xfa\x50\x55\xe2\x5b\x4d\xad\x4b\xb6\xb6\x81\x49\x77\xc7\xca\x68\x1a\x90\x67\xd7\x78\x3f\x3c\xba\xa3\x8e\xdd\xe8\x7b\xc0\x8a\x21\x03\x1a\x03\xdd\xdd\x11\xd1\x20\xd3\x46\x72\x55\x7d\x93\x0d\xb3\xcf\x34\x52\x46\x03\xd9\x8d\x75\xe2\x0e\x42\xbd\xb9\xdf\xe9\xdd\x34\xb6\x24\x9b\x5b\xa4\x56\x3f\x6b\xac\xd8\x01\x30\x1e\x25\xce\x3a\x77\xc6\x73\xd4\xbd\x96\xc9\xf5\x06\xbc\xca\xf8\x44\xb0\x2e\x09\x5a\xf3\xf5\x3a\x94\x7b\xb7\xa8\x9f\x7f\x17\x8e\x58\x53\xb2\x0e\xfc\xf5\x92\x8c\xc2\x4c\x49\xca\x84\xe7\x7d\x5d\xb6\x2f\x7e\x4f\x79\xba\x96\xe6\x75\xb7\x87\x9b\x0d\xdc\xb5\xbd\xae\xbb\x85\xb8\x8e\x64\x67\xd1\xe8\x18\xe5\xe2\x5f\x00\x00\x00\xff\xff\x4e\x9b\x8d\xdf\x17\x11\x00\x00") + +func jsonschemaDraft04JSONBytes() ([]byte, error) { + return bindataRead( + _jsonschemaDraft04JSON, + "jsonschema-draft-04.json", + ) +} + +func jsonschemaDraft04JSON() (*asset, error) { + bytes, err := jsonschemaDraft04JSONBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4375, mode: os.FileMode(420), modTime: time.Unix(1441640690, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5d\xdf\x73\xdc\xb6\xf1\x7f\xcf\x5f\x81\xb9\x78\x46\xf6\x24\xd6\x39\xfe\x7e\x5f\xea\x97\x8c\x1a\x39\x89\x5a\xbb\xd2\xf8\x9c\xf6\xc1\x95\x67\x70\x24\x4e\x87\x84\x3f\x2e\x04\x29\xe9\xea\xea\x7f\xef\x02\xfc\x71\x04\x01\x90\x20\x89\x3b\x9d\x6d\x7a\xa6\x8d\x8e\x04\x16\x8b\xc5\x62\xf7\xb3\x0b\x10\xf8\xf4\x0d\x42\xb3\x94\xa6\x01\x99\xbd\x42\xb3\x33\xf4\xb7\xc5\xe5\x3f\xd0\xc2\x5b\x93\x10\xa3\x55\x9c\xa0\xc5\x1d\xbe\xb9\x21\x09\x7a\x79\xfa\x02\x9d\x5d\x5d\x9c\xce\xbe\xe7\x15\xa8\xcf\x4b\xaf\xd3\x74\xf3\x6a\x3e\x67\x79\x91\x53\x1a\xcf\x6f\x5f\xce\x99\xa8\x7b\xfa\x3b\x8b\xa3\x6f\xf3\xc2\x4f\xf2\x47\xb5\x1a\xfc\xe5\xf3\xa2\x60\x9c\xdc\xcc\xfd\x04\xaf\xd2\xe7\x2f\xfe\xbf\xa8\x5c\xd4\x4b\xb7\x1b\xc1\x54\xbc\xfc\x9d\x78\x69\xfe\x2c\x21\x7f\x66\x34\x21\xbc\xf9\x0f\xf0\x1b\x9e\x14\xad\x8b\xd7\x9c\xb3\x68\x15\x97\x7f\x6f\x70\xba\x66\x33\xf8\xfb\x5a\xd4\xc5\xbe\x4f\x53\x1a\x47\x38\xb8\x4a\xe2\x0d\x49\x52\x4a\x18\xd0\x59\xe1\x80\x11\x51\x00\xca\xa7\x24\x89\xa4\xb7\x9f\x72\x52\x1f\xef\x9f\x57\x3f\x78\x97\x12\xb2\xe2\xac\x7d\x3b\xf7\xc9\x8a\x46\x82\x2c\x9b\xdf\x92\xc8\x8f\x93\xd7\xf7\x29\x89\x18\x3c\x98\x89\xd2\x0f\xf0\xff\x0f\x39\x79\x0d\xdd\x92\xfb\x1a\xed\xb2\xdb\x2c\x4d\x68\x74\x53\xf4\x05\x9e\x93\x28\x0b\xab\x6e\x8b\x27\x30\x26\xb3\xe2\xd7\x75\x55\xcc\x27\xcc\x4b\xe8\x86\x73\xc4\xa9\xbc\x5f\x93\x6a\x0c\x6f\x49\xc2\xf9\x42\xf1\x0a\xa5\x6b\xca\x90\x1f\x7b\x59\x48\xa2\xf4\xb4\xe0\xb4\x2e\xc2\xce\xce\x8a\x52\x52\xbd\x75\xcc\x52\x9b\x8e\x14\x62\xe6\xaf\x3e\x7e\xf8\xf8\xe9\x61\x8e\x5e\xfd\x1b\xfe\x5d\x7f\xf7\xf4\xc7\x57\xf0\x97\xff\xdd\xb3\x1f\x9f\xcc\xda\xfa\xc3\x1b\x42\x4f\x23\x1c\x12\x04\x1a\x4a\x37\xcf\xf2\x1e\x11\xa1\xa0\xe8\xf5\x3d\x0e\x37\x01\x79\x85\x4e\x76\x8a\x79\x22\x73\xba\xc4\x8c\x5c\x81\x72\xf4\xe5\x76\xde\xca\x16\xa7\x8a\xb8\xce\xa1\x34\xd6\xb1\x33\xc7\x1b\x7a\xd2\x90\xb5\x50\xf8\x9a\x42\x18\xc5\x5d\x14\x7c\x43\x41\xc6\x12\x05\x0f\xde\x66\x0d\x12\x0d\xe6\xce\x50\x00\xd5\xb8\x90\xde\x5e\xbc\x7d\x8d\x78\x4f\x19\xc2\x9e\x47\x36\x29\xf1\xd1\x72\x5b\x31\xbb\xeb\x9e\x9e\x89\x90\xf8\x14\xbf\x87\xea\x2a\x1b\xa0\xdc\x7e\xe6\xf5\x67\xa3\x68\x1a\x79\x38\x42\x05\x8d\x51\x6c\x88\x29\xdf\x29\xcd\xca\x32\xec\x6a\xd6\x5e\x77\xd7\xaf\x17\x6e\xb4\x9f\x80\x5a\x82\xc2\x58\x31\x51\x94\x3d\x37\x51\x4b\x08\xdb\xc0\x43\x1b\xfd\x28\x8b\x1a\x69\x31\xe2\x65\x09\x4d\xb7\x16\xaa\x56\x96\xd4\xd6\x3f\xef\x23\x27\x5d\x25\x89\x6a\x8a\x6f\x98\x6e\x16\xe2\x24\xc1\xdb\x9d\x1e\xd0\x94\x84\xf5\x72\xc6\x06\x81\x5e\x69\x12\x1f\xaa\xda\x59\x44\xff\xcc\xc8\x45\x41\x23\x4d\x32\x22\xf1\x40\xee\xf9\x04\xc7\xc1\x79\xec\x59\x74\x49\x2a\xdd\xb0\xf0\x3a\x1d\x52\xcc\xa9\xc6\xad\xe9\x66\xcb\x2f\x24\x22\x09\x0e\x10\xaf\x9e\x84\x98\x3f\x46\x78\x19\x67\xa9\x66\xb6\x2a\x5e\x51\x3c\x2d\xcc\x7d\x55\xac\x72\xf4\x8a\xcf\xe8\xf2\x8c\xe5\xd4\x32\x78\x47\xf1\x5a\xf6\x90\x2d\x02\xd4\x7a\xc9\x52\x8e\xf2\xc0\x69\x3c\x66\xad\x1b\x8d\xd6\x0c\x06\x5c\x27\xdb\x33\x94\xab\x04\xc2\x91\x0f\x56\x87\x78\x14\x2c\xb7\x20\x5a\xf7\x24\x35\xce\xbe\x57\xa5\x3a\xa6\x75\x06\x20\x27\x4a\xa9\x57\x79\x64\x70\xed\x4b\x70\xd0\x9d\x8d\xcb\x94\x86\x33\x10\xc4\x11\x07\x04\xb5\xe7\x92\x0b\x5d\xac\xe3\x2c\x00\xcf\x40\x90\x4f\x57\x2b\x92\x00\x46\x40\xab\x24\x0e\x45\x09\x21\xa7\x53\x84\x7e\xa1\xe9\xaf\xd9\x12\xfd\x1c\xe0\xdb\x18\x74\x0f\xbd\xc5\xc9\x1f\x7e\x7c\x17\x21\x40\x16\x38\x08\xe2\x3b\xe2\x1b\x7a\x01\x6a\x14\xb2\xcb\xd5\x82\x24\xb7\xd4\x1b\x33\x8e\xdc\xeb\x0a\x62\x9c\x7b\x96\x93\x13\xa8\xb5\x5d\x8a\xe0\x32\x53\xec\xa5\x76\xea\x5a\x16\xd6\x52\x0a\xa0\x41\x30\xba\x76\x94\xca\xc2\xaa\xc2\x37\x1d\x7a\x83\x3b\x5b\x93\xf1\x53\x5e\x53\x32\x19\xa5\x34\x60\x60\x40\xd7\x24\x0d\xeb\x39\xfd\x0d\x73\x91\xc3\xb0\x91\x43\x48\x7d\x50\x30\xba\xda\x42\x59\x94\xa3\xba\x9c\xcb\x42\x12\x08\xda\x85\x80\x61\x0e\x91\x02\x8e\xe8\x7f\x44\xbf\x0c\x23\x9b\x25\xc1\x48\x5e\x7e\x7b\xf7\x06\x6d\x62\x0a\xfc\x00\x33\x05\x8e\xf3\x54\xb9\x9e\xca\x84\xf2\xe7\x9c\x06\xb8\x3b\x3d\x6b\x30\xe5\xe9\x58\xe6\x04\x0d\x04\xc3\x05\xde\x9e\x59\x49\xc9\xc0\x65\xce\x4c\x9b\xe5\x3d\x98\xb1\x97\x74\x5f\x9d\x4f\x46\xdd\xd7\xfb\x3c\xa1\x8d\x03\xfd\xdb\xfe\x14\xbc\xae\xd4\x45\x17\x05\xfc\x3d\x45\x17\xe9\x09\x43\x24\xf2\xe2\x2c\xc1\x37\x60\x44\x41\xe3\x32\xc6\xfd\x12\xba\x5c\x00\x28\x8e\x43\x18\x08\xba\x0c\xaa\x6a\x07\xd5\xfb\xaa\x4d\x2b\x5d\x3f\x16\x1d\x52\x42\x00\x4b\xeb\xf9\x8e\x04\x20\xeb\xdb\x3c\x84\x63\xa5\x0c\x68\xe4\xd3\x5b\xea\x67\x80\xc4\x80\x0d\x21\x21\x76\x8a\x40\x62\x5b\x14\x66\x10\xcd\x80\x8f\x4c\xca\x8a\x45\x95\x93\x32\xbc\x3c\x39\x55\xc2\xc8\x3d\x0a\xa3\xa6\x0e\x10\xa8\x5a\x11\xe3\x3d\xe5\xb0\xb8\x6d\x14\xdb\xe6\x8e\x4d\x00\x65\x92\xbe\x81\x6e\x27\xc2\x2f\x92\x49\x0a\x9f\x8d\xd1\xbc\x8c\x44\x72\x20\x04\x68\x92\xe7\xb4\xf2\xf6\x59\x81\x79\x96\x42\xcd\x61\xb0\x72\x72\x0c\xc6\x91\x3f\x29\x82\x69\xbf\x00\x86\x22\x1c\x95\x23\xe4\x86\xaa\x69\x22\xb8\x3d\xf6\xbd\x6a\xaf\x7f\xf7\x13\x02\x38\x97\x81\x9f\x15\x8e\x81\x09\x5c\x50\x0b\x56\xa5\x6e\xe9\x62\xc9\x3d\xf6\xaa\x6c\x6e\xbf\x9d\x32\x45\x79\x3d\x7b\x23\xfb\x8c\x06\x83\x6a\xac\x56\xb6\x5a\xe5\xda\xc4\xcb\x2e\x2f\xc6\xcd\xb9\xe2\xc4\x4c\xfe\xc9\x3e\x26\x70\xe1\x3a\x8e\xdd\xfa\x93\x3c\xdd\x36\x66\x88\x95\x04\x41\x48\x43\xf2\x3e\xa7\xd1\x99\x2e\xd4\xb8\xd6\x2a\xdb\x55\x42\x80\x5f\xdf\xbf\xbf\x42\x21\x40\x38\x70\xf9\x0d\x8b\xc2\xd9\xc0\x8d\xa1\xec\x09\x81\x76\x49\xa3\x81\x38\xe8\x88\xe2\x7c\x39\x3b\x24\x09\x43\xce\x10\x89\x57\x6a\x96\x48\x37\x54\xb5\x97\x0f\x52\x75\x43\x9a\xa8\x51\x70\x06\x0e\x22\xc4\xc9\x76\x54\xfc\xbd\x4c\x28\x81\x88\x35\xa7\x54\xaa\x45\x35\xf6\x8f\x16\xfc\x57\x1c\x7c\x3f\x22\xba\x37\x18\x5a\xf1\xce\x36\xa5\xd6\xa4\x59\x31\x76\xe1\xbb\x48\xfb\x14\x01\x27\xdd\xa5\x5c\xba\x64\xaf\x49\x6f\x1b\x84\xdb\x33\xc5\xdd\x22\x16\x4d\x9a\xbb\xc9\x96\x26\xf9\x3f\x88\xad\x82\x8e\x2b\xb6\xb4\x59\xf0\x16\x92\xbb\xf2\x66\x9a\xba\x5c\x78\x0b\x49\xc5\x0a\x36\x26\xb1\xb2\xee\xd2\x42\x4b\x59\x7b\x69\x52\xf3\x39\x0e\xf1\x70\x4a\x8c\xda\xb9\x8c\xe3\x80\xe0\xa8\xa9\x9e\x2b\x9c\x05\xa9\x84\xa6\x15\x46\xd5\xb4\x7d\x1b\xa7\x52\xea\x5e\xd0\x32\xc6\x48\x02\xf8\xbb\x02\x42\x47\xe4\x34\x0a\xc2\xbd\x81\xd0\x0d\xb1\xcc\x08\xee\x7c\xb4\x5e\xf9\x33\x47\x74\xe4\xe5\xd4\xe1\x84\x7c\x12\xc0\xdc\x72\x42\x2a\xde\x34\xa3\x81\xe1\xb4\xd6\x04\x2b\xd3\x65\x98\xa0\x70\xea\xad\x1d\x51\x72\x64\xb7\xb4\x93\x4e\xbb\x9a\x67\x9d\x9c\xc8\xeb\x56\x61\x2c\x4f\x29\x31\x61\xbb\x09\x05\x4b\x9e\xf0\x44\x04\x8e\xb6\xe8\x16\x07\xd4\xcf\x11\x26\x83\x60\x23\x83\x32\xb1\x2f\xc2\xa6\x93\xc2\xdc\xd4\xb3\x12\x21\x95\xa7\xec\x0f\x6e\x67\xfd\xd3\x0f\x2f\x9e\xff\xe5\xfa\xd3\xff\x3d\x3c\x7b\xf2\xdf\x8f\x4f\x8b\xf6\x9f\x3d\xe9\x67\xc1\xff\x89\x83\x8c\x18\xf2\x1c\x7b\x30\x2b\x51\x9c\x36\x40\xa8\x7e\x84\x2c\x65\xd4\x29\x25\x6d\x37\xfa\x77\x64\xd7\x95\x2e\xf5\xcb\xe5\x59\x53\xc1\x38\x22\x97\x2b\x29\x86\xe8\x31\x3a\xda\x81\xb1\xa8\xcf\xb7\x00\xbd\x23\x62\x6d\xc9\xd3\x2c\x89\x5c\x6b\x59\x1f\x1e\x14\xd5\xa7\x53\xd9\xc4\xbe\x23\xeb\x6a\xdb\x93\x54\x53\x95\x76\x53\x62\x2d\x52\x93\x93\x5f\x4a\x93\x3d\x28\xad\x68\x40\x16\x3a\x6a\xb5\x5f\xd7\x46\xbb\x6d\x6d\x21\xcb\xc2\x86\x48\x41\x89\xd5\x5b\x48\x55\xa5\x5b\x26\xef\x91\x61\x15\x49\x89\x55\xb9\x39\xcf\xa4\xe5\x4d\xcc\x5a\x9a\x77\x06\xf8\xf4\xd3\x4c\x90\xb4\x9e\x5f\xa9\x9c\x53\x91\x98\xd2\x85\x73\xca\x0e\x38\xf1\x54\x53\x92\x9b\x71\xb1\xa2\xde\x7c\x4a\xa3\x94\xdc\xa8\x8f\x75\xe8\x1c\x95\x29\x86\xce\x09\x51\xa5\xc4\x7a\x5b\x08\x5d\xc2\xc2\x04\x35\x12\x1a\x52\xbe\xca\xc0\xf2\x04\x85\x96\x9e\x17\x07\x01\x0c\x25\x54\xf8\x59\xcb\x93\x69\x85\xbb\x51\xcb\x80\x22\xcb\x60\xc5\x82\x64\x59\x58\x4b\x29\xc4\xf7\x34\xcc\x42\x3b\x4a\x65\x61\x83\x01\xf1\x82\x8c\x81\x50\xde\xf6\x21\xa9\xd4\xd2\x73\x09\xe5\xed\xb9\x2c\x0a\x77\x70\xd9\x87\xa4\x52\xcb\x24\xcb\x37\x24\xba\x49\x2d\xf1\xef\xae\xb8\xa9\xcf\xbd\xa8\x55\xc5\x4d\xb8\xbc\xd8\x39\x69\xb7\x14\x25\x0a\x9b\x7a\x79\x61\x3f\x55\xaa\xd2\xa6\x3e\xf6\xa1\x55\x96\xd6\xd2\x92\x33\x86\x16\xe4\xea\x15\xf4\xba\x12\x59\xeb\x47\x64\xd4\x09\x98\x79\x14\x3c\xe5\xa5\x12\x06\x1b\xfa\xb8\x2b\x6f\x98\xf9\xfd\x61\x90\xe2\x99\x1f\xc9\xe9\x36\x8b\xb7\xec\x4e\x85\xe0\xa9\x70\x54\x5b\x1e\x3a\x25\x62\x25\xfc\x0e\x82\x2b\x74\xff\x9c\x67\x3d\x45\x64\xd5\xbd\x6b\x86\xe7\x8d\x35\x65\x8c\xbb\x0f\x97\xb1\xbf\xbd\xaa\xd6\xf5\xc6\x6d\x7c\xa8\xbb\x16\x69\xdf\x9f\x8c\x1b\xaf\x8f\x31\x6d\xe3\x2a\xbb\x9d\xa7\xd6\x35\xc9\xed\x2a\x58\xe7\xcb\xf7\x94\xc7\xc5\x7c\x8f\x9b\xd8\x3d\x43\x21\x8a\x2e\xd0\x25\x2f\x9d\xb1\x71\xfb\xdb\x1c\xef\x18\xd9\x31\x6e\x40\x11\x63\x04\x76\xce\x09\x83\x95\x2b\x12\xc2\x41\xec\x61\xbd\xd0\x6c\xa0\x18\xd7\xe5\x6e\xbc\x54\x53\xe0\x3e\xb9\x52\x13\xdb\x77\x6b\x22\x12\x20\x71\x82\x20\x76\xcf\xbf\x6c\xa8\xd8\xe6\x83\x55\xb6\xc7\x4b\xe4\x09\x2c\x1c\x9c\x0e\xc8\xc4\x6a\xc3\x39\xbb\x38\x6d\xc4\xb6\x8a\x1c\xb7\x57\x16\x62\x91\x2d\x17\x4d\x46\x8e\x2d\xec\xe9\x9c\xeb\x9f\xa9\x06\x1c\xcf\x44\x93\x03\x3d\xfe\x4f\x3f\xd5\x26\xa3\x3a\xd4\xa8\x1e\x3e\x36\x35\x04\xa1\x86\x90\x75\x8a\x4d\xa7\xd8\x74\x8a\x4d\x5b\x7b\x3d\xc5\xa6\x5f\x68\x6c\xfa\x4d\xfd\xbf\x25\x4e\x02\xde\x93\xed\x04\x93\x26\x98\x54\x7b\x2a\x74\x62\x42\x49\xfb\x43\x49\x82\x99\xd7\xe1\x26\xdd\x36\x57\x15\xa5\x96\x6d\x76\xbf\xb4\xb1\x25\x9a\x61\x88\xc1\x94\xe2\x49\x19\x5c\x53\xdb\xe5\xb6\x60\x38\x0a\xb6\x5c\x6f\x45\xc2\x86\xaf\x8a\x73\xa6\x78\xce\x26\x33\x7d\x33\x31\x21\xbc\xa3\x44\x78\xff\x82\x01\x7c\xcb\xad\xfe\x04\xf5\xd0\x04\xf5\x26\xa8\x37\x41\x3d\xd4\x84\x7a\xdc\xe4\x9d\xe3\x14\x4f\x68\x6f\x42\x7b\xb5\xa7\xa5\x5a\x4c\x80\x6f\x02\x7c\x3a\xde\x3f\x0f\xc0\xd7\x78\xc8\xf7\x69\x4d\x20\x10\x4d\x20\x70\x02\x81\x5d\xbd\x9e\x40\xe0\xd7\x04\x02\xf9\x27\x2c\x9f\x27\x00\x34\x7d\xb6\x59\x3c\x2d\x1e\x75\x6f\x9f\x1c\x04\x18\xb5\x4e\x4d\xfa\xd6\xb1\xd6\xb4\xa8\xe1\x1c\x62\x1e\x39\x8c\xe4\x8a\x35\x41\xc8\x69\x65\xb5\xfa\xf7\x75\x40\xae\x09\x69\xa1\x09\x69\x4d\x48\x6b\x42\x5a\xa8\x89\xb4\xa2\x38\xfa\xeb\x21\x36\xa9\xea\x3f\x1e\x19\xf4\x75\x9a\x71\xd3\x9c\x4e\x74\x16\xf4\x5a\x32\x8e\x03\x29\x9a\x96\xab\x07\x92\x33\xa0\x61\x65\x64\xaf\x1b\x18\x5a\x33\xa4\x83\x04\x2e\xef\x62\x1e\xd8\x09\x45\xd1\x3a\xd8\x57\x76\x64\xda\x7e\x4d\x7b\x06\x80\x2b\x47\x8c\x94\xd5\x8f\xe1\x04\x04\x83\xa5\x13\xd0\x73\x3a\x07\x3d\xc6\x4b\x09\x17\x5c\x7f\xe7\x3e\x1c\xb8\x68\x8f\x5d\x2d\xfb\x67\x79\xb4\xfb\x7c\xd7\x9d\xb9\x74\x1a\xad\x35\xbc\x1e\xd1\xa0\xe6\x2b\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x66\x0f\x68\xb1\xeb\x73\x8e\x0e\x20\x36\xa4\x45\x17\x68\x6d\x40\xbb\x4e\x20\xdd\x90\xfe\xba\xc0\x7d\xa3\xfa\x3b\x0a\x1c\xda\xb6\x2c\xf9\x97\x98\x89\x38\xe4\xa2\x88\x99\x86\x01\x49\x07\x2d\x9f\xe7\xf3\xe9\xc5\x20\xf0\x39\x40\xe6\xa3\x10\xea\x3e\x25\xbd\xef\x86\xdb\x05\x6d\x81\x81\x07\x08\xbb\x13\x28\x83\xc0\xcd\xc7\x28\x1c\x42\xea\x07\x69\xbd\x5d\xf4\xa6\xb4\xdf\x18\x06\x72\xaf\x7f\x26\x67\x3e\x6c\x23\x94\x21\x96\xcc\x18\xc6\x74\x7e\xfa\xce\xcb\x44\xdb\xc3\x1e\xa1\xd0\xcc\xa8\xca\x95\x6b\xbf\x9a\x99\xd3\x0a\x4d\x3c\xe8\x01\x95\x26\x15\x36\x06\x4f\xd5\x02\x28\x8b\x94\xd3\xa3\x89\x51\xb7\xd0\x29\x1b\xb5\x1f\x94\x97\xfa\xb3\xfd\x7a\x32\x28\x0f\xd6\xa8\xa1\xc3\x41\xa0\xa2\xb6\x96\xb3\x09\x4d\x9d\x33\x1d\x5a\x68\xdb\x29\x3d\x77\x86\x98\xc1\x1c\x21\xe5\x7d\xea\x9e\x7b\x7d\x38\xeb\x27\x50\x9f\x72\x6c\x0e\x62\xc2\x69\x9c\x0c\x89\x4e\x12\x88\xf9\x2f\xa3\xc0\x78\x30\xe3\xe0\x23\xd8\xee\x43\xe5\x84\x54\xbd\x0c\x78\x41\x03\x26\x74\x7f\x24\x62\xb1\x02\x50\x17\xe8\x2e\x66\xb4\xfe\xba\xb0\x76\x0c\xcc\x17\x1e\x46\xbb\x38\xb0\x64\x0a\x9d\x8f\x26\x74\x7e\x14\x14\xe4\x66\xe9\xca\x6e\x4b\xcf\xde\xcc\xd9\xf1\x1a\xa3\xe6\x6a\x97\x85\x45\x1a\x75\xb3\xc3\x74\xfe\xd0\xb4\x12\xd9\x45\x69\x5a\x89\x9c\x56\x22\xa7\x95\xc8\xc7\x5b\x89\x7c\x04\xc8\x28\xf9\x24\xdd\xb5\x89\x63\x2f\x29\x2c\x69\xbe\xcb\x31\x0c\xbf\x16\x62\xa6\xf4\xb7\xe3\xd2\x42\x1d\x8d\xe1\xfe\x52\x75\x8a\x4a\x0c\xab\x77\x16\x56\x37\x4c\x98\xce\xd5\x97\x25\x2d\x87\x59\xfb\xf1\xf8\x16\x5b\x9d\xb4\x5d\x10\xa7\xed\x6e\x70\xe7\x65\x75\x7a\x3b\x04\x4a\x40\xef\x87\xd4\x04\x95\x4f\xe8\x32\x53\x0f\x6f\x1e\x0d\x02\xef\x12\xbc\xd9\xb8\x3a\xae\xfc\x58\xe6\x2a\xbf\xfc\xd3\x95\x06\xf5\xb9\x5e\xcc\xb5\xb6\x8d\x3c\x77\xd6\x19\xc0\x3f\x96\x71\xed\xb8\x7a\x76\xb8\xad\xd3\x9d\xc5\x6b\x95\xed\x5a\x62\x46\xbd\xb3\x2c\x5d\xf3\x7b\x24\xf2\xcd\xa6\x0b\xe5\xe8\xfd\x46\x0a\xcc\x8a\x30\xde\xd0\xbf\x93\xad\x1b\x5a\x31\x06\x06\x5f\x5e\x40\x60\x46\x3d\x9a\xba\xa4\x79\x85\x19\xbb\x8b\x13\xdf\x25\xcd\xb3\x0d\xe7\xd3\xa1\x28\x0b\xb2\x9e\x47\x18\xfb\x29\xf6\x89\x96\x6a\xf5\xf7\xb5\x56\xf3\xda\xc6\x79\xbf\x96\xe6\x31\x4e\xd2\x15\xbd\x75\xb9\xf5\xf9\xf8\x4c\x49\x63\x7e\x1d\x60\x0c\x1b\x28\xa2\xb1\xfd\xed\xc0\x23\x9c\x77\xbf\x7b\x88\x87\x7a\xae\x7e\xbb\xf8\x5b\xcf\x36\x6b\xe4\x42\x9c\x1f\xe5\x71\x7c\xba\x69\xb0\xd7\x87\xd5\xd1\x55\x10\xdf\x49\x77\x1c\x00\x4f\x71\x52\xdc\x27\xfb\x5b\x9f\x7b\xe9\xdc\x68\x6c\x2e\x14\x8b\x24\x18\xe7\x7b\x74\x6b\xb4\x10\x7e\x77\x7b\xcc\x83\x5e\xdb\x5e\x84\x22\xfa\xb0\xc8\x6b\x68\xa9\x29\x52\xee\xd1\x13\x8b\xbb\x87\x3f\xff\x59\xa1\x20\x8e\xc7\x9d\x15\x69\xfc\x07\xf9\xf2\x67\xc3\xa6\x10\xfa\xa1\x67\x43\x25\xdd\x69\x16\xc8\xb3\x40\x87\x91\xa7\x89\x50\xb4\xbc\xc7\x89\x80\x77\x72\x9f\xe6\xc2\xb1\xcc\x05\x35\xb0\x3b\x32\xa4\xf4\xf5\x4d\x93\x6a\x48\xbe\x30\xfc\x34\x4d\x42\xa4\x9f\x84\x8b\xe6\x28\x3a\x58\x78\x90\xbb\x2c\xb7\x2a\xdf\x3d\xea\x70\x49\xa6\xba\xe2\x59\x91\x6f\xc7\x3a\x4c\xe3\x0a\xc0\x6e\x96\x34\x5f\x4f\xef\x68\xa0\x88\x10\x9f\xf8\x28\x8d\xc5\xd9\x37\x08\x17\xf7\xf9\xe5\xf7\xb4\x06\x81\xf6\xfa\x89\x92\x37\xd9\x88\x69\xba\x3e\x38\xdd\xa9\xdc\x3b\x2f\x89\xc8\x9a\x8c\xe1\xce\x37\x6d\x1a\xce\x7a\x1d\xac\x76\x37\xeb\x20\xe1\xa7\x09\x8e\x18\xf0\xc4\x2f\xff\x48\x63\x2f\x0e\xca\xef\xd8\xc5\x75\xff\x6d\xe2\x34\xce\x7e\x9d\x79\x14\x1b\x92\x64\x0b\xc1\x9f\x30\xf9\xd1\x9d\xf2\xbb\x66\xf0\x7a\xcb\xa6\x65\x3b\x86\x89\x79\x95\xf5\x99\xc7\x6e\xa5\xab\x44\xe4\x9f\xa9\xfc\x73\x43\x37\xba\x8b\xc7\x77\x4b\x47\x82\x5c\x2b\x97\xbb\xb3\x7f\x0e\xc5\x6e\xed\x41\x58\x3f\x74\xc8\x8e\xff\xe6\xd6\x3e\x47\xdb\xfa\x4a\xf2\x7a\x27\xe1\x74\x2b\xdf\xae\xa9\xe6\x16\x1b\x67\xdb\xf7\x2a\x03\xae\xdb\x1b\xe0\xf2\x6b\xb7\xaa\x21\x65\x47\x8e\xb3\x2f\xdc\xca\x26\x5a\x76\xff\xb8\xff\xaa\xad\xea\x97\xb2\x87\xc7\xd9\x97\x6c\x6a\xbf\x9c\xb6\xa5\xdf\x50\x54\x1b\x2f\x65\xeb\x8f\xfb\x2f\x7a\x6a\x52\xdc\x6b\x6b\xf2\x17\x3c\x3b\xac\xd0\xdc\x90\xe4\xec\xcb\xb4\x9a\x18\x95\xbd\x93\xfb\x94\xe2\x3e\x1b\xd3\x0b\x51\xbf\xe7\xc9\xe9\x57\x67\xd5\x44\x88\xdc\x29\x7f\xd4\x54\x78\x19\x0c\x59\x20\x68\x7d\x54\x2a\x78\x52\xfc\xd5\xa8\x4d\x32\xbd\x3e\x2c\x97\x61\xfa\x37\xfc\x7f\x0f\xff\x0b\x00\x00\xff\xff\x31\x8b\xeb\xb6\x54\x9c\x00\x00") + +func v2SchemaJSONBytes() ([]byte, error) { + return bindataRead( + _v2SchemaJSON, + "v2/schema.json", + ) +} + +func v2SchemaJSON() (*asset, error) { + bytes, err := v2SchemaJSONBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "v2/schema.json", size: 40020, mode: os.FileMode(420), modTime: time.Unix(1446147817, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "jsonschema-draft-04.json": jsonschemaDraft04JSON, + "v2/schema.json": v2SchemaJSON, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "jsonschema-draft-04.json": &bintree{jsonschemaDraft04JSON, map[string]*bintree{}}, + "v2": &bintree{nil, map[string]*bintree{ + "schema.json": &bintree{v2SchemaJSON, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go new file mode 100644 index 000000000..f285970aa --- /dev/null +++ b/vendor/github.com/go-openapi/spec/contact_info.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// ContactInfo contact information for the exposed API. +// +// For more information: http://goo.gl/8us55a#contactObject +type ContactInfo struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` + Email string `json:"email,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go new file mode 100644 index 000000000..eb1490b05 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -0,0 +1,626 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "net/url" + "reflect" + "strings" + "sync" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// ResolutionCache a cache for resolving urls +type ResolutionCache interface { + Get(string) (interface{}, bool) + Set(string, interface{}) +} + +type simpleCache struct { + lock sync.Mutex + store map[string]interface{} +} + +var resCache = initResolutionCache() + +func initResolutionCache() ResolutionCache { + return &simpleCache{store: map[string]interface{}{ + "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), + "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), + }} +} + +func (s *simpleCache) Get(uri string) (interface{}, bool) { + s.lock.Lock() + v, ok := s.store[uri] + s.lock.Unlock() + return v, ok +} + +func (s *simpleCache) Set(uri string, data interface{}) { + s.lock.Lock() + s.store[uri] = data + s.lock.Unlock() +} + +// ResolveRef resolves a reference against a context root +func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { + resolver, err := defaultSchemaLoader(root, nil, nil) + if err != nil { + return nil, err + } + + result := new(Schema) + if err := resolver.Resolve(ref, result); err != nil { + return nil, err + } + return result, nil +} + +// ResolveParameter resolves a paramter reference against a context root +func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { + resolver, err := defaultSchemaLoader(root, nil, nil) + if err != nil { + return nil, err + } + + result := new(Parameter) + if err := resolver.Resolve(&ref, result); err != nil { + return nil, err + } + return result, nil +} + +// ResolveResponse resolves response a reference against a context root +func ResolveResponse(root interface{}, ref Ref) (*Response, error) { + resolver, err := defaultSchemaLoader(root, nil, nil) + if err != nil { + return nil, err + } + + result := new(Response) + if err := resolver.Resolve(&ref, result); err != nil { + return nil, err + } + return result, nil +} + +type schemaLoader struct { + loadingRef *Ref + startingRef *Ref + currentRef *Ref + root interface{} + cache ResolutionCache + loadDoc func(string) (json.RawMessage, error) +} + +var idPtr, _ = jsonpointer.New("/id") +var schemaPtr, _ = jsonpointer.New("/$schema") +var refPtr, _ = jsonpointer.New("/$ref") + +func defaultSchemaLoader(root interface{}, ref *Ref, cache ResolutionCache) (*schemaLoader, error) { + if cache == nil { + cache = resCache + } + + var ptr *jsonpointer.Pointer + if ref != nil { + ptr = ref.GetPointer() + } + + currentRef := nextRef(root, ref, ptr) + + return &schemaLoader{ + root: root, + loadingRef: ref, + startingRef: ref, + cache: cache, + loadDoc: func(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil + }, + currentRef: currentRef, + }, nil +} + +func idFromNode(node interface{}) (*Ref, error) { + if idValue, _, err := idPtr.Get(node); err == nil { + if refStr, ok := idValue.(string); ok && refStr != "" { + idRef, err := NewRef(refStr) + if err != nil { + return nil, err + } + return &idRef, nil + } + } + return nil, nil +} + +func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref { + if startingRef == nil { + return nil + } + if ptr == nil { + return startingRef + } + + ret := startingRef + var idRef *Ref + node := startingNode + + for _, tok := range ptr.DecodedTokens() { + node, _, _ = jsonpointer.GetForToken(node, tok) + if node == nil { + break + } + + idRef, _ = idFromNode(node) + if idRef != nil { + nw, err := ret.Inherits(*idRef) + if err != nil { + break + } + ret = nw + } + + refRef, _, _ := refPtr.Get(node) + if refRef != nil { + rf, _ := NewRef(refRef.(string)) + nw, err := ret.Inherits(rf) + if err != nil { + break + } + ret = nw + } + + } + return ret +} + +func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error { + tgt := reflect.ValueOf(target) + if tgt.Kind() != reflect.Ptr { + return fmt.Errorf("resolve ref: target needs to be a pointer") + } + + oldRef := currentRef + if currentRef != nil { + var err error + currentRef, err = currentRef.Inherits(*nextRef(node, ref, currentRef.GetPointer())) + if err != nil { + return err + } + } + if currentRef == nil { + currentRef = ref + } + + refURL := currentRef.GetURL() + if refURL == nil { + return nil + } + if currentRef.IsRoot() { + nv := reflect.ValueOf(node) + reflect.Indirect(tgt).Set(reflect.Indirect(nv)) + return nil + } + + if strings.HasPrefix(refURL.String(), "#") { + res, _, err := ref.GetPointer().Get(node) + if err != nil { + res, _, err = ref.GetPointer().Get(r.root) + if err != nil { + return err + } + } + rv := reflect.Indirect(reflect.ValueOf(res)) + tgtType := reflect.Indirect(tgt).Type() + if rv.Type().AssignableTo(tgtType) { + reflect.Indirect(tgt).Set(reflect.Indirect(reflect.ValueOf(res))) + } else { + if err := swag.DynamicJSONToStruct(rv.Interface(), target); err != nil { + return err + } + } + + return nil + } + + if refURL.Scheme != "" && refURL.Host != "" { + // most definitely take the red pill + data, _, _, err := r.load(refURL) + if err != nil { + return err + } + + if ((oldRef == nil && currentRef != nil) || + (oldRef != nil && currentRef == nil) || + oldRef.String() != currentRef.String()) && + ((oldRef == nil && ref != nil) || + (oldRef != nil && ref == nil) || + (oldRef.String() != ref.String())) { + + return r.resolveRef(currentRef, ref, data, target) + } + + var res interface{} + if currentRef.String() != "" { + res, _, err = currentRef.GetPointer().Get(data) + if err != nil { + return err + } + } else { + res = data + } + + if err := swag.DynamicJSONToStruct(res, target); err != nil { + return err + } + + } + return nil +} + +func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { + toFetch := *refURL + toFetch.Fragment = "" + + data, fromCache := r.cache.Get(toFetch.String()) + if !fromCache { + b, err := r.loadDoc(toFetch.String()) + if err != nil { + return nil, url.URL{}, false, err + } + + if err := json.Unmarshal(b, &data); err != nil { + return nil, url.URL{}, false, err + } + r.cache.Set(toFetch.String(), data) + } + + return data, toFetch, fromCache, nil +} +func (r *schemaLoader) Resolve(ref *Ref, target interface{}) error { + if err := r.resolveRef(r.currentRef, ref, r.root, target); err != nil { + return err + } + + return nil +} + +type specExpander struct { + spec *Swagger + resolver *schemaLoader +} + +// ExpandSpec expands the references in a swagger spec +func ExpandSpec(spec *Swagger) error { + resolver, err := defaultSchemaLoader(spec, nil, nil) + if err != nil { + return err + } + + for key, defintition := range spec.Definitions { + var def *Schema + var err error + if def, err = expandSchema(defintition, []string{"#/definitions/" + key}, resolver); err != nil { + return err + } + spec.Definitions[key] = *def + } + + for key, parameter := range spec.Parameters { + if err := expandParameter(¶meter, resolver); err != nil { + return err + } + spec.Parameters[key] = parameter + } + + for key, response := range spec.Responses { + if err := expandResponse(&response, resolver); err != nil { + return err + } + spec.Responses[key] = response + } + + if spec.Paths != nil { + for key, path := range spec.Paths.Paths { + if err := expandPathItem(&path, resolver); err != nil { + return err + } + spec.Paths.Paths[key] = path + } + } + + return nil +} + +// ExpandSchema expands the refs in the schema object +func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { + + if schema == nil { + return nil + } + if root == nil { + root = schema + } + + nrr, _ := NewRef(schema.ID) + var rrr *Ref + if nrr.String() != "" { + switch root.(type) { + case *Schema: + rid, _ := NewRef(root.(*Schema).ID) + rrr, _ = rid.Inherits(nrr) + case *Swagger: + rid, _ := NewRef(root.(*Swagger).ID) + rrr, _ = rid.Inherits(nrr) + } + + } + + resolver, err := defaultSchemaLoader(root, rrr, cache) + if err != nil { + return err + } + + refs := []string{""} + if rrr != nil { + refs[0] = rrr.String() + } + var s *Schema + if s, err = expandSchema(*schema, refs, resolver); err != nil { + return nil + } + *schema = *s + return nil +} + +func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*Schema, error) { + if target.Items != nil { + if target.Items.Schema != nil { + t, err := expandSchema(*target.Items.Schema, parentRefs, resolver) + if err != nil { + return nil, err + } + *target.Items.Schema = *t + } + for i := range target.Items.Schemas { + t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver) + if err != nil { + return nil, err + } + target.Items.Schemas[i] = *t + } + } + return &target, nil +} + +func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (schema *Schema, err error) { + defer func() { + schema = &target + }() + if target.Ref.String() == "" && target.Ref.IsRoot() { + target = *resolver.root.(*Schema) + return + } + + // t is the new expanded schema + var t *Schema + for target.Ref.String() != "" { + // var newTarget Schema + pRefs := strings.Join(parentRefs, ",") + pRefs += "," + if strings.Contains(pRefs, target.Ref.String()+",") { + err = nil + return + } + + if err = resolver.Resolve(&target.Ref, &t); err != nil { + return + } + parentRefs = append(parentRefs, target.Ref.String()) + target = *t + } + + if t, err = expandItems(target, parentRefs, resolver); err != nil { + return + } + target = *t + + for i := range target.AllOf { + if t, err = expandSchema(target.AllOf[i], parentRefs, resolver); err != nil { + return + } + target.AllOf[i] = *t + } + for i := range target.AnyOf { + if t, err = expandSchema(target.AnyOf[i], parentRefs, resolver); err != nil { + return + } + target.AnyOf[i] = *t + } + for i := range target.OneOf { + if t, err = expandSchema(target.OneOf[i], parentRefs, resolver); err != nil { + return + } + target.OneOf[i] = *t + } + if target.Not != nil { + if t, err = expandSchema(*target.Not, parentRefs, resolver); err != nil { + return + } + *target.Not = *t + } + for k, _ := range target.Properties { + if t, err = expandSchema(target.Properties[k], parentRefs, resolver); err != nil { + return + } + target.Properties[k] = *t + } + if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { + if t, err = expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver); err != nil { + return + } + *target.AdditionalProperties.Schema = *t + } + for k, _ := range target.PatternProperties { + if t, err = expandSchema(target.PatternProperties[k], parentRefs, resolver); err != nil { + return + } + target.PatternProperties[k] = *t + } + for k, _ := range target.Dependencies { + if target.Dependencies[k].Schema != nil { + if t, err = expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver); err != nil { + return + } + *target.Dependencies[k].Schema = *t + } + } + if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { + if t, err = expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver); err != nil { + return + } + *target.AdditionalItems.Schema = *t + } + for k, _ := range target.Definitions { + if t, err = expandSchema(target.Definitions[k], parentRefs, resolver); err != nil { + return + } + target.Definitions[k] = *t + } + return +} + +func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error { + if pathItem == nil { + return nil + } + if pathItem.Ref.String() != "" { + if err := resolver.Resolve(&pathItem.Ref, &pathItem); err != nil { + return err + } + } + + for idx := range pathItem.Parameters { + if err := expandParameter(&(pathItem.Parameters[idx]), resolver); err != nil { + return err + } + } + if err := expandOperation(pathItem.Get, resolver); err != nil { + return err + } + if err := expandOperation(pathItem.Head, resolver); err != nil { + return err + } + if err := expandOperation(pathItem.Options, resolver); err != nil { + return err + } + if err := expandOperation(pathItem.Put, resolver); err != nil { + return err + } + if err := expandOperation(pathItem.Post, resolver); err != nil { + return err + } + if err := expandOperation(pathItem.Patch, resolver); err != nil { + return err + } + if err := expandOperation(pathItem.Delete, resolver); err != nil { + return err + } + return nil +} + +func expandOperation(op *Operation, resolver *schemaLoader) error { + if op == nil { + return nil + } + for i, param := range op.Parameters { + if err := expandParameter(¶m, resolver); err != nil { + return err + } + op.Parameters[i] = param + } + + if op.Responses != nil { + responses := op.Responses + if err := expandResponse(responses.Default, resolver); err != nil { + return err + } + for code, response := range responses.StatusCodeResponses { + if err := expandResponse(&response, resolver); err != nil { + return err + } + responses.StatusCodeResponses[code] = response + } + } + return nil +} + +func expandResponse(response *Response, resolver *schemaLoader) error { + if response == nil { + return nil + } + + if response.Ref.String() != "" { + if err := resolver.Resolve(&response.Ref, response); err != nil { + return err + } + } + + if response.Schema != nil { + parentRefs := []string{response.Schema.Ref.String()} + if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); err != nil { + return err + } + if s, err := expandSchema(*response.Schema, parentRefs, resolver); err != nil { + return err + } else { + *response.Schema = *s + } + } + return nil +} + +func expandParameter(parameter *Parameter, resolver *schemaLoader) error { + if parameter == nil { + return nil + } + if parameter.Ref.String() != "" { + if err := resolver.Resolve(¶meter.Ref, parameter); err != nil { + return err + } + } + if parameter.Schema != nil { + parentRefs := []string{parameter.Schema.Ref.String()} + if err := resolver.Resolve(¶meter.Schema.Ref, ¶meter.Schema); err != nil { + return err + } + if s, err := expandSchema(*parameter.Schema, parentRefs, resolver); err != nil { + return err + } else { + *parameter.Schema = *s + } + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go new file mode 100644 index 000000000..88add91b2 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/external_docs.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// ExternalDocumentation allows referencing an external resource for +// extended documentation. +// +// For more information: http://goo.gl/8us55a#externalDocumentationObject +type ExternalDocumentation struct { + Description string `json:"description,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go new file mode 100644 index 000000000..758b84531 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/header.go @@ -0,0 +1,165 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +type HeaderProps struct { + Description string `json:"description,omitempty"` +} + +// Header describes a header for a response of the API +// +// For more information: http://goo.gl/8us55a#headerObject +type Header struct { + CommonValidations + SimpleSchema + HeaderProps +} + +// ResponseHeader creates a new header instance for use in a response +func ResponseHeader() *Header { + return new(Header) +} + +// WithDescription sets the description on this response, allows for chaining +func (h *Header) WithDescription(description string) *Header { + h.Description = description + return h +} + +// Typed a fluent builder method for the type of parameter +func (h *Header) Typed(tpe, format string) *Header { + h.Type = tpe + h.Format = format + return h +} + +// CollectionOf a fluent builder method for an array item +func (h *Header) CollectionOf(items *Items, format string) *Header { + h.Type = "array" + h.Items = items + h.CollectionFormat = format + return h +} + +// WithDefault sets the default value on this item +func (h *Header) WithDefault(defaultValue interface{}) *Header { + h.Default = defaultValue + return h +} + +// WithMaxLength sets a max length value +func (h *Header) WithMaxLength(max int64) *Header { + h.MaxLength = &max + return h +} + +// WithMinLength sets a min length value +func (h *Header) WithMinLength(min int64) *Header { + h.MinLength = &min + return h +} + +// WithPattern sets a pattern value +func (h *Header) WithPattern(pattern string) *Header { + h.Pattern = pattern + return h +} + +// WithMultipleOf sets a multiple of value +func (h *Header) WithMultipleOf(number float64) *Header { + h.MultipleOf = &number + return h +} + +// WithMaximum sets a maximum number value +func (h *Header) WithMaximum(max float64, exclusive bool) *Header { + h.Maximum = &max + h.ExclusiveMaximum = exclusive + return h +} + +// WithMinimum sets a minimum number value +func (h *Header) WithMinimum(min float64, exclusive bool) *Header { + h.Minimum = &min + h.ExclusiveMinimum = exclusive + return h +} + +// WithEnum sets a the enum values (replace) +func (h *Header) WithEnum(values ...interface{}) *Header { + h.Enum = append([]interface{}{}, values...) + return h +} + +// WithMaxItems sets the max items +func (h *Header) WithMaxItems(size int64) *Header { + h.MaxItems = &size + return h +} + +// WithMinItems sets the min items +func (h *Header) WithMinItems(size int64) *Header { + h.MinItems = &size + return h +} + +// UniqueValues dictates that this array can only have unique items +func (h *Header) UniqueValues() *Header { + h.UniqueItems = true + return h +} + +// AllowDuplicates this array can have duplicates +func (h *Header) AllowDuplicates() *Header { + h.UniqueItems = false + return h +} + +// MarshalJSON marshal this to JSON +func (h Header) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(h.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(h.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(h.HeaderProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// UnmarshalJSON marshal this from JSON +func (h *Header) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &h.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &h.HeaderProps); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go new file mode 100644 index 000000000..fb8b7c4ac --- /dev/null +++ b/vendor/github.com/go-openapi/spec/info.go @@ -0,0 +1,168 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Extensions vendor specific extensions +type Extensions map[string]interface{} + +// Add adds a value to these extensions +func (e Extensions) Add(key string, value interface{}) { + realKey := strings.ToLower(key) + e[realKey] = value +} + +// GetString gets a string value from the extensions +func (e Extensions) GetString(key string) (string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(string) + return str, ok + } + return "", false +} + +// GetBool gets a string value from the extensions +func (e Extensions) GetBool(key string) (bool, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(bool) + return str, ok + } + return false, false +} + +// GetStringSlice gets a string value from the extensions +func (e Extensions) GetStringSlice(key string) ([]string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + arr, ok := v.([]interface{}) + if !ok { + return nil, false + } + var strs []string + for _, iface := range arr { + str, ok := iface.(string) + if !ok { + return nil, false + } + strs = append(strs, str) + } + return strs, ok + } + return nil, false +} + +// VendorExtensible composition block. +type VendorExtensible struct { + Extensions Extensions +} + +// AddExtension adds an extension to this extensible object +func (v *VendorExtensible) AddExtension(key string, value interface{}) { + if value == nil { + return + } + if v.Extensions == nil { + v.Extensions = make(map[string]interface{}) + } + v.Extensions.Add(key, value) +} + +// MarshalJSON marshals the extensions to json +func (v VendorExtensible) MarshalJSON() ([]byte, error) { + toser := make(map[string]interface{}) + for k, v := range v.Extensions { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + toser[k] = v + } + } + return json.Marshal(toser) +} + +// UnmarshalJSON for this extensible object +func (v *VendorExtensible) UnmarshalJSON(data []byte) error { + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if v.Extensions == nil { + v.Extensions = map[string]interface{}{} + } + v.Extensions[k] = vv + } + } + return nil +} + +// InfoProps the properties for an info definition +type InfoProps struct { + Description string `json:"description,omitempty"` + Title string `json:"title,omitempty"` + TermsOfService string `json:"termsOfService,omitempty"` + Contact *ContactInfo `json:"contact,omitempty"` + License *License `json:"license,omitempty"` + Version string `json:"version,omitempty"` +} + +// Info object provides metadata about the API. +// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. +// +// For more information: http://goo.gl/8us55a#infoObject +type Info struct { + VendorExtensible + InfoProps +} + +// JSONLookup look up a value by the json property name +func (i Info) JSONLookup(token string) (interface{}, error) { + if ex, ok := i.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(i.InfoProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (i Info) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.InfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (i *Info) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &i.InfoProps); err != nil { + return err + } + if err := json.Unmarshal(data, &i.VendorExtensible); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go new file mode 100644 index 000000000..4d57ea5ca --- /dev/null +++ b/vendor/github.com/go-openapi/spec/items.go @@ -0,0 +1,199 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +type SimpleSchema struct { + Type string `json:"type,omitempty"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitempty"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` +} + +func (s *SimpleSchema) TypeName() string { + if s.Format != "" { + return s.Format + } + return s.Type +} + +func (s *SimpleSchema) ItemsTypeName() string { + if s.Items == nil { + return "" + } + return s.Items.TypeName() +} + +type CommonValidations struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + +// Items a limited subset of JSON-Schema's items object. +// It is used by parameter definitions that are not located in "body". +// +// For more information: http://goo.gl/8us55a#items-object- +type Items struct { + Refable + CommonValidations + SimpleSchema +} + +// NewItems creates a new instance of items +func NewItems() *Items { + return &Items{} +} + +// Typed a fluent builder method for the type of item +func (i *Items) Typed(tpe, format string) *Items { + i.Type = tpe + i.Format = format + return i +} + +// CollectionOf a fluent builder method for an array item +func (i *Items) CollectionOf(items *Items, format string) *Items { + i.Type = "array" + i.Items = items + i.CollectionFormat = format + return i +} + +// WithDefault sets the default value on this item +func (i *Items) WithDefault(defaultValue interface{}) *Items { + i.Default = defaultValue + return i +} + +// WithMaxLength sets a max length value +func (i *Items) WithMaxLength(max int64) *Items { + i.MaxLength = &max + return i +} + +// WithMinLength sets a min length value +func (i *Items) WithMinLength(min int64) *Items { + i.MinLength = &min + return i +} + +// WithPattern sets a pattern value +func (i *Items) WithPattern(pattern string) *Items { + i.Pattern = pattern + return i +} + +// WithMultipleOf sets a multiple of value +func (i *Items) WithMultipleOf(number float64) *Items { + i.MultipleOf = &number + return i +} + +// WithMaximum sets a maximum number value +func (i *Items) WithMaximum(max float64, exclusive bool) *Items { + i.Maximum = &max + i.ExclusiveMaximum = exclusive + return i +} + +// WithMinimum sets a minimum number value +func (i *Items) WithMinimum(min float64, exclusive bool) *Items { + i.Minimum = &min + i.ExclusiveMinimum = exclusive + return i +} + +// WithEnum sets a the enum values (replace) +func (i *Items) WithEnum(values ...interface{}) *Items { + i.Enum = append([]interface{}{}, values...) + return i +} + +// WithMaxItems sets the max items +func (i *Items) WithMaxItems(size int64) *Items { + i.MaxItems = &size + return i +} + +// WithMinItems sets the min items +func (i *Items) WithMinItems(size int64) *Items { + i.MinItems = &size + return i +} + +// UniqueValues dictates that this array can only have unique items +func (i *Items) UniqueValues() *Items { + i.UniqueItems = true + return i +} + +// AllowDuplicates this array can have duplicates +func (i *Items) AllowDuplicates() *Items { + i.UniqueItems = false + return i +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (i *Items) UnmarshalJSON(data []byte) error { + var validations CommonValidations + if err := json.Unmarshal(data, &validations); err != nil { + return err + } + var ref Refable + if err := json.Unmarshal(data, &ref); err != nil { + return err + } + var simpleSchema SimpleSchema + if err := json.Unmarshal(data, &simpleSchema); err != nil { + return err + } + i.Refable = ref + i.CommonValidations = validations + i.SimpleSchema = simpleSchema + return nil +} + +// MarshalJSON converts this items object to JSON +func (i Items) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(i.Refable) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b3, b1, b2), nil +} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go new file mode 100644 index 000000000..f20961b4f --- /dev/null +++ b/vendor/github.com/go-openapi/spec/license.go @@ -0,0 +1,23 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// License information for the exposed API. +// +// For more information: http://goo.gl/8us55a#licenseObject +type License struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go new file mode 100644 index 000000000..de1db6f02 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -0,0 +1,233 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +type OperationProps struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss] + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitempty"` +} + +// Operation describes a single API operation on a path. +// +// For more information: http://goo.gl/8us55a#operationObject +type Operation struct { + VendorExtensible + OperationProps +} + +// SuccessResponse gets a success response model +func (o *Operation) SuccessResponse() (*Response, int, bool) { + if o.Responses == nil { + return nil, 0, false + } + + for k, v := range o.Responses.StatusCodeResponses { + if k/100 == 2 { + return &v, k, true + } + } + + return o.Responses.Default, 0, false +} + +// JSONLookup look up a value by the json property name +func (o Operation) JSONLookup(token string) (interface{}, error) { + if ex, ok := o.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(o.OperationProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *Operation) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &o.OperationProps); err != nil { + return err + } + if err := json.Unmarshal(data, &o.VendorExtensible); err != nil { + return err + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (o Operation) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(o.OperationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// NewOperation creates a new operation instance. +// It expects an ID as parameter but not passing an ID is also valid. +func NewOperation(id string) *Operation { + op := new(Operation) + op.ID = id + return op +} + +// WithID sets the ID property on this operation, allows for chaining. +func (o *Operation) WithID(id string) *Operation { + o.ID = id + return o +} + +// WithDescription sets the description on this operation, allows for chaining +func (o *Operation) WithDescription(description string) *Operation { + o.Description = description + return o +} + +// WithSummary sets the summary on this operation, allows for chaining +func (o *Operation) WithSummary(summary string) *Operation { + o.Summary = summary + return o +} + +// WithExternalDocs sets/removes the external docs for/from this operation. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (o *Operation) WithExternalDocs(description, url string) *Operation { + if description == "" && url == "" { + o.ExternalDocs = nil + return o + } + + if o.ExternalDocs == nil { + o.ExternalDocs = &ExternalDocumentation{} + } + o.ExternalDocs.Description = description + o.ExternalDocs.URL = url + return o +} + +// Deprecate marks the operation as deprecated +func (o *Operation) Deprecate() *Operation { + o.Deprecated = true + return o +} + +// Undeprecate marks the operation as not deprected +func (o *Operation) Undeprecate() *Operation { + o.Deprecated = false + return o +} + +// WithConsumes adds media types for incoming body values +func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { + o.Consumes = append(o.Consumes, mediaTypes...) + return o +} + +// WithProduces adds media types for outgoing body values +func (o *Operation) WithProduces(mediaTypes ...string) *Operation { + o.Produces = append(o.Produces, mediaTypes...) + return o +} + +// WithTags adds tags for this operation +func (o *Operation) WithTags(tags ...string) *Operation { + o.Tags = append(o.Tags, tags...) + return o +} + +// AddParam adds a parameter to this operation, when a parameter for that location +// and with that name already exists it will be replaced +func (o *Operation) AddParam(param *Parameter) *Operation { + if param == nil { + return o + } + + for i, p := range o.Parameters { + if p.Name == param.Name && p.In == param.In { + params := append(o.Parameters[:i], *param) + params = append(params, o.Parameters[i+1:]...) + o.Parameters = params + return o + } + } + + o.Parameters = append(o.Parameters, *param) + return o +} + +// RemoveParam removes a parameter from the operation +func (o *Operation) RemoveParam(name, in string) *Operation { + for i, p := range o.Parameters { + if p.Name == name && p.In == name { + o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) + return o + } + } + return o +} + +// SecuredWith adds a security scope to this operation. +func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { + o.Security = append(o.Security, map[string][]string{name: scopes}) + return o +} + +// WithDefaultResponse adds a default response to the operation. +// Passing a nil value will remove the response +func (o *Operation) WithDefaultResponse(response *Response) *Operation { + return o.RespondsWith(0, response) +} + +// RespondsWith adds a status code response to the operation. +// When the code is 0 the value of the response will be used as default response value. +// When the value of the response is nil it will be removed from the operation +func (o *Operation) RespondsWith(code int, response *Response) *Operation { + if o.Responses == nil { + o.Responses = new(Responses) + } + if code == 0 { + o.Responses.Default = response + return o + } + if response == nil { + delete(o.Responses.StatusCodeResponses, code) + return o + } + if o.Responses.StatusCodeResponses == nil { + o.Responses.StatusCodeResponses = make(map[int]Response) + } + o.Responses.StatusCodeResponses[code] = *response + return o +} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go new file mode 100644 index 000000000..8fb66d12a --- /dev/null +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -0,0 +1,299 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// QueryParam creates a query parameter +func QueryParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} +} + +// HeaderParam creates a header parameter, this is always required by default +func HeaderParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} +} + +// PathParam creates a path parameter, this is always required +func PathParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} +} + +// BodyParam creates a body parameter +func BodyParam(name string, schema *Schema) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, SimpleSchema: SimpleSchema{Type: "object"}} +} + +// FormDataParam creates a body parameter +func FormDataParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} +} + +// FileParam creates a body parameter +func FileParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, SimpleSchema: SimpleSchema{Type: "file"}} +} + +// SimpleArrayParam creates a param for a simple array (string, int, date etc) +func SimpleArrayParam(name, tpe, fmt string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name}, SimpleSchema: SimpleSchema{Type: "array", CollectionFormat: "csv", Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}} +} + +// ParamRef creates a parameter that's a json reference +func ParamRef(uri string) *Parameter { + p := new(Parameter) + p.Ref = MustCreateRef(uri) + return p +} + +type ParamProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitempty"` + Schema *Schema `json:"schema,omitempty"` // when in == "body" + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` // when in == "query" || "formData" +} + +// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). +// +// There are five possible parameter types. +// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, the path parameter is `itemId`. +// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// * Header - Custom headers that are expected as part of the request. +// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be *one* body parameter. The name of the body parameter has no effect on the parameter itself and is used for documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist together for the same operation. +// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or `multipart/form-data` are used as the content type of the request (in Swagger's definition, the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be declared together with a body parameter for the same operation. Form parameters have a different format based on the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4): +// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple parameters that are being transferred. +// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is `submit-name`. This type of form parameters is more commonly used for file transfers. +// +// For more information: http://goo.gl/8us55a#parameterObject +type Parameter struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible + ParamProps +} + +// JSONLookup look up a value by the json property name +func (p Parameter) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == "$ref" { + return &p.Ref, nil + } + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + if err != nil { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + if err != nil { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.ParamProps, token) + return r, err +} + +// WithDescription a fluent builder method for the description of the parameter +func (p *Parameter) WithDescription(description string) *Parameter { + p.Description = description + return p +} + +// Named a fluent builder method to override the name of the parameter +func (p *Parameter) Named(name string) *Parameter { + p.Name = name + return p +} + +// WithLocation a fluent builder method to override the location of the parameter +func (p *Parameter) WithLocation(in string) *Parameter { + p.In = in + return p +} + +// Typed a fluent builder method for the type of the parameter value +func (p *Parameter) Typed(tpe, format string) *Parameter { + p.Type = tpe + p.Format = format + return p +} + +// CollectionOf a fluent builder method for an array parameter +func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { + p.Type = "array" + p.Items = items + p.CollectionFormat = format + return p +} + +// WithDefault sets the default value on this parameter +func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { + p.AsOptional() // with default implies optional + p.Default = defaultValue + return p +} + +// AllowsEmptyValues flags this parameter as being ok with empty values +func (p *Parameter) AllowsEmptyValues() *Parameter { + p.AllowEmptyValue = true + return p +} + +// NoEmptyValues flags this parameter as not liking empty values +func (p *Parameter) NoEmptyValues() *Parameter { + p.AllowEmptyValue = false + return p +} + +// AsOptional flags this parameter as optional +func (p *Parameter) AsOptional() *Parameter { + p.Required = false + return p +} + +// AsRequired flags this parameter as required +func (p *Parameter) AsRequired() *Parameter { + if p.Default != nil { // with a default required makes no sense + return p + } + p.Required = true + return p +} + +// WithMaxLength sets a max length value +func (p *Parameter) WithMaxLength(max int64) *Parameter { + p.MaxLength = &max + return p +} + +// WithMinLength sets a min length value +func (p *Parameter) WithMinLength(min int64) *Parameter { + p.MinLength = &min + return p +} + +// WithPattern sets a pattern value +func (p *Parameter) WithPattern(pattern string) *Parameter { + p.Pattern = pattern + return p +} + +// WithMultipleOf sets a multiple of value +func (p *Parameter) WithMultipleOf(number float64) *Parameter { + p.MultipleOf = &number + return p +} + +// WithMaximum sets a maximum number value +func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { + p.Maximum = &max + p.ExclusiveMaximum = exclusive + return p +} + +// WithMinimum sets a minimum number value +func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { + p.Minimum = &min + p.ExclusiveMinimum = exclusive + return p +} + +// WithEnum sets a the enum values (replace) +func (p *Parameter) WithEnum(values ...interface{}) *Parameter { + p.Enum = append([]interface{}{}, values...) + return p +} + +// WithMaxItems sets the max items +func (p *Parameter) WithMaxItems(size int64) *Parameter { + p.MaxItems = &size + return p +} + +// WithMinItems sets the min items +func (p *Parameter) WithMinItems(size int64) *Parameter { + p.MinItems = &size + return p +} + +// UniqueValues dictates that this array can only have unique items +func (p *Parameter) UniqueValues() *Parameter { + p.UniqueItems = true + return p +} + +// AllowDuplicates this array can have duplicates +func (p *Parameter) AllowDuplicates() *Parameter { + p.UniqueItems = false + return p +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Parameter) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + if err := json.Unmarshal(data, &p.ParamProps); err != nil { + return err + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (p Parameter) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.ParamProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b3, b1, b2, b4, b5), nil +} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go new file mode 100644 index 000000000..9ab3ec538 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -0,0 +1,90 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// pathItemProps the path item specific properties +type PathItemProps struct { + Get *Operation `json:"get,omitempty"` + Put *Operation `json:"put,omitempty"` + Post *Operation `json:"post,omitempty"` + Delete *Operation `json:"delete,omitempty"` + Options *Operation `json:"options,omitempty"` + Head *Operation `json:"head,omitempty"` + Patch *Operation `json:"patch,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` +} + +// PathItem describes the operations available on a single path. +// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// The path itself is still exposed to the documentation viewer but they will +// not know which operations and parameters are available. +// +// For more information: http://goo.gl/8us55a#pathItemObject +type PathItem struct { + Refable + VendorExtensible + PathItemProps +} + +// JSONLookup look up a value by the json property name +func (p PathItem) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == "$ref" { + return &p.Ref, nil + } + r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *PathItem) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + if err := json.Unmarshal(data, &p.PathItemProps); err != nil { + return err + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (p PathItem) MarshalJSON() ([]byte, error) { + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.PathItemProps) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b3, b4, b5) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go new file mode 100644 index 000000000..9dc82a290 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/paths.go @@ -0,0 +1,97 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/swag" +) + +// Paths holds the relative paths to the individual endpoints. +// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order +// to construct the full URL. +// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// +// For more information: http://goo.gl/8us55a#pathsObject +type Paths struct { + VendorExtensible + Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" +} + +// JSONLookup look up a value by the json property name +func (p Paths) JSONLookup(token string) (interface{}, error) { + if pi, ok := p.Paths[token]; ok { + return &pi, nil + } + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Paths) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + for k, v := range res { + if strings.HasPrefix(strings.ToLower(k), "x-") { + if p.Extensions == nil { + p.Extensions = make(map[string]interface{}) + } + var d interface{} + if err := json.Unmarshal(v, &d); err != nil { + return err + } + p.Extensions[k] = d + } + if strings.HasPrefix(k, "/") { + if p.Paths == nil { + p.Paths = make(map[string]PathItem) + } + var pi PathItem + if err := json.Unmarshal(v, &pi); err != nil { + return err + } + p.Paths[k] = pi + } + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (p Paths) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + + pths := make(map[string]PathItem) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go new file mode 100644 index 000000000..68631df8b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -0,0 +1,167 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "net/http" + "os" + "path/filepath" + + "github.com/go-openapi/jsonreference" +) + +// Refable is a struct for things that accept a $ref property +type Refable struct { + Ref Ref +} + +// MarshalJSON marshals the ref to json +func (r Refable) MarshalJSON() ([]byte, error) { + return r.Ref.MarshalJSON() +} + +// UnmarshalJSON unmarshalss the ref from json +func (r *Refable) UnmarshalJSON(d []byte) error { + return json.Unmarshal(d, &r.Ref) +} + +// Ref represents a json reference that is potentially resolved +type Ref struct { + jsonreference.Ref +} + +// RemoteURI gets the remote uri part of the ref +func (r *Ref) RemoteURI() string { + if r.String() == "" { + return r.String() + } + + u := *r.GetURL() + u.Fragment = "" + return u.String() +} + +// IsValidURI returns true when the url the ref points to can be found +func (r *Ref) IsValidURI() bool { + if r.String() == "" { + return true + } + + v := r.RemoteURI() + if v == "" { + return true + } + + if r.HasFullURL { + rr, err := http.Get(v) + if err != nil { + return false + } + + return rr.StatusCode/100 == 2 + } + + if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { + return false + } + + // check for local file + pth := v + if r.HasURLPathOnly { + p, e := filepath.Abs(pth) + if e != nil { + return false + } + pth = p + } + + fi, err := os.Stat(pth) + if err != nil { + return false + } + + return !fi.IsDir() +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + ref, err := r.Ref.Inherits(child.Ref) + if err != nil { + return nil, err + } + return &Ref{Ref: *ref}, nil +} + +// NewRef creates a new instance of a ref object +// returns an error when the reference uri is an invalid uri +func NewRef(refURI string) (Ref, error) { + ref, err := jsonreference.New(refURI) + if err != nil { + return Ref{}, err + } + return Ref{Ref: ref}, nil +} + +// MustCreateRef creates a ref object but +func MustCreateRef(refURI string) Ref { + return Ref{Ref: jsonreference.MustCreateRef(refURI)} +} + +// // NewResolvedRef creates a resolved ref +// func NewResolvedRef(refURI string, data interface{}) Ref { +// return Ref{ +// Ref: jsonreference.MustCreateRef(refURI), +// Resolved: data, +// } +// } + +// MarshalJSON marshals this ref into a JSON object +func (r Ref) MarshalJSON() ([]byte, error) { + str := r.String() + if str == "" { + if r.IsRoot() { + return []byte(`{"$ref":"#"}`), nil + } + return []byte("{}"), nil + } + v := map[string]interface{}{"$ref": str} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshals this ref from a JSON object +func (r *Ref) UnmarshalJSON(d []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(d, &v); err != nil { + return err + } + + if v == nil { + return nil + } + + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *r = Ref{Ref: ref} + } + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go new file mode 100644 index 000000000..308cc8478 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/response.go @@ -0,0 +1,113 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +// ResponseProps properties specific to a response +type ResponseProps struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + +// Response describes a single response from an API Operation. +// +// For more information: http://goo.gl/8us55a#responseObject +type Response struct { + Refable + ResponseProps +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Response) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (r Response) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponseProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// NewResponse creates a new response instance +func NewResponse() *Response { + return new(Response) +} + +// ResponseRef creates a response as a json reference +func ResponseRef(url string) *Response { + resp := NewResponse() + resp.Ref = MustCreateRef(url) + return resp +} + +// WithDescription sets the description on this response, allows for chaining +func (r *Response) WithDescription(description string) *Response { + r.Description = description + return r +} + +// WithSchema sets the schema on this response, allows for chaining. +// Passing a nil argument removes the schema from this response +func (r *Response) WithSchema(schema *Schema) *Response { + r.Schema = schema + return r +} + +// AddHeader adds a header to this response +func (r *Response) AddHeader(name string, header *Header) *Response { + if header == nil { + return r.RemoveHeader(name) + } + if r.Headers == nil { + r.Headers = make(map[string]Header) + } + r.Headers[name] = *header + return r +} + +// RemoveHeader removes a header from this response +func (r *Response) RemoveHeader(name string) *Response { + delete(r.Headers, name) + return r +} + +// AddExample adds an example to this response +func (r *Response) AddExample(mediaType string, example interface{}) *Response { + if r.Examples == nil { + r.Examples = make(map[string]interface{}) + } + r.Examples[mediaType] = example + return r +} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go new file mode 100644 index 000000000..ea071ca63 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -0,0 +1,122 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "github.com/go-openapi/swag" +) + +// Responses is a container for the expected responses of an operation. +// The container maps a HTTP response code to the expected response. +// It is not expected from the documentation to necessarily cover all possible HTTP response codes, +// since they may not be known in advance. However, it is expected from the documentation to cover +// a successful operation response and any known errors. +// +// The `default` can be used a default response object for all HTTP codes that are not covered +// individually by the specification. +// +// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response +// for a successful operation call. +// +// For more information: http://goo.gl/8us55a#responsesObject +type Responses struct { + VendorExtensible + ResponsesProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (r Responses) JSONLookup(token string) (interface{}, error) { + if token == "default" { + return r.Default, nil + } + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if i, err := strconv.Atoi(token); err == nil { + if scr, ok := r.StatusCodeResponses[i]; ok { + return &scr, nil + } + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Responses) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { + r.ResponsesProps = ResponsesProps{} + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (r Responses) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponsesProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +type ResponsesProps struct { + Default *Response + StatusCodeResponses map[int]Response +} + +func (r ResponsesProps) MarshalJSON() ([]byte, error) { + toser := map[string]Response{} + if r.Default != nil { + toser["default"] = *r.Default + } + for k, v := range r.StatusCodeResponses { + toser[strconv.Itoa(k)] = v + } + return json.Marshal(toser) +} + +func (r *ResponsesProps) UnmarshalJSON(data []byte) error { + var res map[string]Response + if err := json.Unmarshal(data, &res); err != nil { + return nil + } + if v, ok := res["default"]; ok { + r.Default = &v + delete(res, "default") + } + for k, v := range res { + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = v + } + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go new file mode 100644 index 000000000..eb88f005c --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -0,0 +1,628 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// BooleanProperty creates a boolean property +func BooleanProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} +} + +// BoolProperty creates a boolean property +func BoolProperty() *Schema { return BooleanProperty() } + +// StringProperty creates a string property +func StringProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// CharProperty creates a string property +func CharProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// Float64Property creates a float64/double property +func Float64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} +} + +// Float32Property creates a float32/float property +func Float32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} +} + +// Int8Property creates an int8 property +func Int8Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} +} + +// Int16Property creates an int16 property +func Int16Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} +} + +// Int32Property creates an int32 property +func Int32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} +} + +// Int64Property creates an int64 property +func Int64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} +} + +// StrFmtProperty creates a property for the named string format +func StrFmtProperty(format string) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} +} + +// DateProperty creates a date property +func DateProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} +} + +// DateTimeProperty creates a date time property +func DateTimeProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} +} + +// MapProperty creates a map property +func MapProperty(property *Schema) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} +} + +// RefProperty creates a ref property +func RefProperty(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// RefSchema creates a ref property +func RefSchema(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// ArrayProperty creates an array property +func ArrayProperty(items *Schema) *Schema { + if items == nil { + return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} + } + return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} +} + +// ComposedSchema creates a schema with allOf +func ComposedSchema(schemas ...Schema) *Schema { + s := new(Schema) + s.AllOf = schemas + return s +} + +// SchemaURL represents a schema url +type SchemaURL string + +// MarshalJSON marshal this to JSON +func (r SchemaURL) MarshalJSON() ([]byte, error) { + if r == "" { + return []byte("{}"), nil + } + v := map[string]interface{}{"$schema": string(r)} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshal this from JSON +func (r *SchemaURL) UnmarshalJSON(data []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(data, &v); err != nil { + return err + } + if v == nil { + return nil + } + if vv, ok := v["$schema"]; ok { + if str, ok := vv.(string); ok { + u, err := url.Parse(str) + if err != nil { + return err + } + + *r = SchemaURL(u.String()) + } + } + return nil +} + +// type ExtraSchemaProps map[string]interface{} + +// // JSONSchema represents a structure that is a json schema draft 04 +// type JSONSchema struct { +// SchemaProps +// ExtraSchemaProps +// } + +// // MarshalJSON marshal this to JSON +// func (s JSONSchema) MarshalJSON() ([]byte, error) { +// b1, err := json.Marshal(s.SchemaProps) +// if err != nil { +// return nil, err +// } +// b2, err := s.Ref.MarshalJSON() +// if err != nil { +// return nil, err +// } +// b3, err := s.Schema.MarshalJSON() +// if err != nil { +// return nil, err +// } +// b4, err := json.Marshal(s.ExtraSchemaProps) +// if err != nil { +// return nil, err +// } +// return swag.ConcatJSON(b1, b2, b3, b4), nil +// } + +// // UnmarshalJSON marshal this from JSON +// func (s *JSONSchema) UnmarshalJSON(data []byte) error { +// var sch JSONSchema +// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.Ref); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.Schema); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil { +// return err +// } +// *s = sch +// return nil +// } + +type SchemaProps struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-,omitempty"` + Schema SchemaURL `json:"-,omitempty"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitempty"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitempty"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitempty"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitempty"` + Properties map[string]Schema `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` + PatternProperties map[string]Schema `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` + Definitions Definitions `json:"definitions,omitempty"` +} + +type SwaggerSchemaProps struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitempty"` + XML *XMLObject `json:"xml,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// Schema the schema object allows the definition of input and output data types. +// These types can be objects, but also primitives and arrays. +// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) +// and uses a predefined subset of it. +// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. +// +// For more information: http://goo.gl/8us55a#schemaObject +type Schema struct { + VendorExtensible + SchemaProps + SwaggerSchemaProps + ExtraProps map[string]interface{} `json:"-"` +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s Schema) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + if ex, ok := s.ExtraProps[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) + if r != nil || err != nil { + return r, err + } + r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) + return r, err +} + +// WithID sets the id for this schema, allows for chaining +func (s *Schema) WithID(id string) *Schema { + s.ID = id + return s +} + +// WithTitle sets the title for this schema, allows for chaining +func (s *Schema) WithTitle(title string) *Schema { + s.Title = title + return s +} + +// WithDescription sets the description for this schema, allows for chaining +func (s *Schema) WithDescription(description string) *Schema { + s.Description = description + return s +} + +// WithProperties sets the properties for this schema +func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { + s.Properties = schemas + return s +} + +// SetProperty sets a property on this schema +func (s *Schema) SetProperty(name string, schema Schema) *Schema { + if s.Properties == nil { + s.Properties = make(map[string]Schema) + } + s.Properties[name] = schema + return s +} + +// WithAllOf sets the all of property +func (s *Schema) WithAllOf(schemas ...Schema) *Schema { + s.AllOf = schemas + return s +} + +// WithMaxProperties sets the max number of properties an object can have +func (s *Schema) WithMaxProperties(max int64) *Schema { + s.MaxProperties = &max + return s +} + +// WithMinProperties sets the min number of properties an object must have +func (s *Schema) WithMinProperties(min int64) *Schema { + s.MinProperties = &min + return s +} + +// Typed sets the type of this schema for a single value item +func (s *Schema) Typed(tpe, format string) *Schema { + s.Type = []string{tpe} + s.Format = format + return s +} + +// AddType adds a type with potential format to the types for this schema +func (s *Schema) AddType(tpe, format string) *Schema { + s.Type = append(s.Type, tpe) + if format != "" { + s.Format = format + } + return s +} + +// CollectionOf a fluent builder method for an array parameter +func (s *Schema) CollectionOf(items Schema) *Schema { + s.Type = []string{"array"} + s.Items = &SchemaOrArray{Schema: &items} + return s +} + +// WithDefault sets the default value on this parameter +func (s *Schema) WithDefault(defaultValue interface{}) *Schema { + s.Default = defaultValue + return s +} + +// WithRequired flags this parameter as required +func (s *Schema) WithRequired(items ...string) *Schema { + s.Required = items + return s +} + +// AddRequired adds field names to the required properties array +func (s *Schema) AddRequired(items ...string) *Schema { + s.Required = append(s.Required, items...) + return s +} + +// WithMaxLength sets a max length value +func (s *Schema) WithMaxLength(max int64) *Schema { + s.MaxLength = &max + return s +} + +// WithMinLength sets a min length value +func (s *Schema) WithMinLength(min int64) *Schema { + s.MinLength = &min + return s +} + +// WithPattern sets a pattern value +func (s *Schema) WithPattern(pattern string) *Schema { + s.Pattern = pattern + return s +} + +// WithMultipleOf sets a multiple of value +func (s *Schema) WithMultipleOf(number float64) *Schema { + s.MultipleOf = &number + return s +} + +// WithMaximum sets a maximum number value +func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { + s.Maximum = &max + s.ExclusiveMaximum = exclusive + return s +} + +// WithMinimum sets a minimum number value +func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { + s.Minimum = &min + s.ExclusiveMinimum = exclusive + return s +} + +// WithEnum sets a the enum values (replace) +func (s *Schema) WithEnum(values ...interface{}) *Schema { + s.Enum = append([]interface{}{}, values...) + return s +} + +// WithMaxItems sets the max items +func (s *Schema) WithMaxItems(size int64) *Schema { + s.MaxItems = &size + return s +} + +// WithMinItems sets the min items +func (s *Schema) WithMinItems(size int64) *Schema { + s.MinItems = &size + return s +} + +// UniqueValues dictates that this array can only have unique items +func (s *Schema) UniqueValues() *Schema { + s.UniqueItems = true + return s +} + +// AllowDuplicates this array can have duplicates +func (s *Schema) AllowDuplicates() *Schema { + s.UniqueItems = false + return s +} + +// AddToAllOf adds a schema to the allOf property +func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { + s.AllOf = append(s.AllOf, schemas...) + return s +} + +// WithDiscriminator sets the name of the discriminator field +func (s *Schema) WithDiscriminator(discriminator string) *Schema { + s.Discriminator = discriminator + return s +} + +// AsReadOnly flags this schema as readonly +func (s *Schema) AsReadOnly() *Schema { + s.ReadOnly = true + return s +} + +// AsWritable flags this schema as writeable (not read-only) +func (s *Schema) AsWritable() *Schema { + s.ReadOnly = false + return s +} + +// WithExample sets the example for this schema +func (s *Schema) WithExample(example interface{}) *Schema { + s.Example = example + return s +} + +// WithExternalDocs sets/removes the external docs for/from this schema. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (s *Schema) WithExternalDocs(description, url string) *Schema { + if description == "" && url == "" { + s.ExternalDocs = nil + return s + } + + if s.ExternalDocs == nil { + s.ExternalDocs = &ExternalDocumentation{} + } + s.ExternalDocs.Description = description + s.ExternalDocs.URL = url + return s +} + +// WithXMLName sets the xml name for the object +func (s *Schema) WithXMLName(name string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Name = name + return s +} + +// WithXMLNamespace sets the xml namespace for the object +func (s *Schema) WithXMLNamespace(namespace string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Namespace = namespace + return s +} + +// WithXMLPrefix sets the xml prefix for the object +func (s *Schema) WithXMLPrefix(prefix string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Prefix = prefix + return s +} + +// AsXMLAttribute flags this object as xml attribute +func (s *Schema) AsXMLAttribute() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = true + return s +} + +// AsXMLElement flags this object as an xml node +func (s *Schema) AsXMLElement() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = false + return s +} + +// AsWrappedXML flags this object as wrapped, this is mostly useful for array types +func (s *Schema) AsWrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = true + return s +} + +// AsUnwrappedXML flags this object as an xml node +func (s *Schema) AsUnwrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = false + return s +} + +// MarshalJSON marshal this to JSON +func (s Schema) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SchemaProps) + if err != nil { + return nil, fmt.Errorf("schema props %v", err) + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, fmt.Errorf("vendor props %v", err) + } + b3, err := s.Ref.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("ref prop %v", err) + } + b4, err := s.Schema.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("schema prop %v", err) + } + b5, err := json.Marshal(s.SwaggerSchemaProps) + if err != nil { + return nil, fmt.Errorf("common validations %v", err) + } + var b6 []byte + if s.ExtraProps != nil { + jj, err := json.Marshal(s.ExtraProps) + if err != nil { + return nil, fmt.Errorf("extra props %v", err) + } + b6 = jj + } + return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *Schema) UnmarshalJSON(data []byte) error { + var sch Schema + if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { + return err + } + if err := json.Unmarshal(data, &sch.Ref); err != nil { + return err + } + if err := json.Unmarshal(data, &sch.Schema); err != nil { + return err + } + if err := json.Unmarshal(data, &sch.SwaggerSchemaProps); err != nil { + return err + } + + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + delete(d, "$ref") + delete(d, "$schema") + for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { + delete(d, pn) + } + + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if sch.Extensions == nil { + sch.Extensions = map[string]interface{}{} + } + sch.Extensions[k] = vv + continue + } + if sch.ExtraProps == nil { + sch.ExtraProps = map[string]interface{}{} + } + sch.ExtraProps[k] = vv + } + + *s = sch + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go new file mode 100644 index 000000000..22d4f10af --- /dev/null +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -0,0 +1,142 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + basic = "basic" + apiKey = "apiKey" + oauth2 = "oauth2" + implicit = "implicit" + password = "password" + application = "application" + accessCode = "accessCode" +) + +// BasicAuth creates a basic auth security scheme +func BasicAuth() *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} +} + +// APIKeyAuth creates an api key auth security scheme +func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} +} + +// OAuth2Implicit creates an implicit flow oauth2 security scheme +func OAuth2Implicit(authorizationURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: implicit, + AuthorizationURL: authorizationURL, + }} +} + +// OAuth2Password creates a password flow oauth2 security scheme +func OAuth2Password(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: password, + TokenURL: tokenURL, + }} +} + +// OAuth2Application creates an application flow oauth2 security scheme +func OAuth2Application(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: application, + TokenURL: tokenURL, + }} +} + +// OAuth2AccessToken creates an access token flow oauth2 security scheme +func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: accessCode, + AuthorizationURL: authorizationURL, + TokenURL: tokenURL, + }} +} + +type SecuritySchemeProps struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 +} + +// AddScope adds a scope to this security scheme +func (s *SecuritySchemeProps) AddScope(scope, description string) { + if s.Scopes == nil { + s.Scopes = make(map[string]string) + } + s.Scopes[scope] = description +} + +// SecurityScheme allows the definition of a security scheme that can be used by the operations. +// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) +// and OAuth2's common flows (implicit, password, application and access code). +// +// For more information: http://goo.gl/8us55a#securitySchemeObject +type SecurityScheme struct { + VendorExtensible + SecuritySchemeProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (s SecurityScheme) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SecuritySchemeProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *SecurityScheme) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { + return err + } + if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go new file mode 100644 index 000000000..cc2ae56b2 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -0,0 +1,79 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import "encoding/json" + +//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... +//go:generate perl -pi -e s,Json,JSON,g bindata.go + +const ( + // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs + SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" + // JSONSchemaURL the url for the json schema schema + JSONSchemaURL = "http://json-schema.org/draft-04/schema#" +) + +var ( + jsonSchema = MustLoadJSONSchemaDraft04() + swaggerSchema = MustLoadSwagger20Schema() +) + +// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error +func MustLoadJSONSchemaDraft04() *Schema { + d, e := JSONSchemaDraft04() + if e != nil { + panic(e) + } + return d +} + +// JSONSchemaDraft04 loads the json schema document for json shema draft04 +func JSONSchemaDraft04() (*Schema, error) { + b, err := Asset("jsonschema-draft-04.json") + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} + +// MustLoadSwagger20Schema panics when Swagger20Schema returns an error +func MustLoadSwagger20Schema() *Schema { + d, e := Swagger20Schema() + if e != nil { + panic(e) + } + return d +} + +// Swagger20Schema loads the swagger 2.0 schema from the embedded assets +func Swagger20Schema() (*Schema, error) { + + b, err := Asset("v2/schema.json") + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go new file mode 100644 index 000000000..ff3ef875e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -0,0 +1,317 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Swagger this is the root document object for the API specification. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) together into one document. +// +// For more information: http://goo.gl/8us55a#swagger-object- +type Swagger struct { + VendorExtensible + SwaggerProps +} + +// JSONLookup look up a value by the json property name +func (s Swagger) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) + return r, err +} + +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SwaggerProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON unmarshals a swagger spec from json +func (s *Swagger) UnmarshalJSON(data []byte) error { + var sw Swagger + if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { + return err + } + if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { + return err + } + *s = sw + return nil +} + +type SwaggerProps struct { + ID string `json:"id,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss] + Swagger string `json:"swagger,omitempty"` + Info *Info `json:"info,omitempty"` + Host string `json:"host,omitempty"` + BasePath string `json:"basePath,omitempty"` // must start with a leading "/" + Paths *Paths `json:"paths"` // required + Definitions Definitions `json:"definitions"` + Parameters map[string]Parameter `json:"parameters,omitempty"` + Responses map[string]Response `json:"responses,omitempty"` + SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Tags []Tag `json:"tags,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +// Dependencies represent a dependencies property +type Dependencies map[string]SchemaOrStringArray + +// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property +type SchemaOrBool struct { + Allows bool + Schema *Schema +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { + if token == "allows" { + return s.Allows, nil + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +// UnmarshalJSON converts this bool or schema object from a JSON structure +func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { + var nw SchemaOrBool + if len(data) >= 4 { + if data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') + } + *s = nw + return nil +} + +// SchemaOrStringArray represents a schema or a string array +type SchemaOrStringArray struct { + Schema *Schema + Property []string +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return nil, nil +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw SchemaOrStringArray + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +// Definitions contains the models explicitly defined in this spec +// An object to hold data types that can be consumed and produced by operations. +// These data types can be primitives, arrays or models. +// +// For more information: http://goo.gl/8us55a#definitionsObject +type Definitions map[string]Schema + +// SecurityDefinitions a declaration of the security schemes available to be used in the specification. +// This does not enforce the security schemes on the operations and only serves to provide +// the relevant details for each scheme. +// +// For more information: http://goo.gl/8us55a#securityDefinitionsObject +type SecurityDefinitions map[string]*SecurityScheme + +// StringOrArray represents a value that can either be a string +// or an array of strings. Mainly here for serialization purposes +type StringOrArray []string + +// Contains returns true when the value is contained in the slice +func (s StringOrArray) Contains(value string) bool { + for _, str := range s { + if str == value { + return true + } + } + return false +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { + if _, err := strconv.Atoi(token); err == nil { + r, _, err := jsonpointer.GetForToken(s.Schemas, token) + return r, err + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string +func (s *StringOrArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + + if first == '[' { + var parsed []string + if err := json.Unmarshal(data, &parsed); err != nil { + return err + } + *s = StringOrArray(parsed) + return nil + } + + var single interface{} + if err := json.Unmarshal(data, &single); err != nil { + return err + } + if single == nil { + return nil + } + switch single.(type) { + case string: + *s = StringOrArray([]string{single.(string)}) + return nil + default: + return fmt.Errorf("only string or array is allowed, not %T", single) + } +} + +// MarshalJSON converts this string or array to a JSON array or JSON string +func (s StringOrArray) MarshalJSON() ([]byte, error) { + if len(s) == 1 { + return json.Marshal([]string(s)[0]) + } + return json.Marshal([]string(s)) +} + +// SchemaOrArray represents a value that can either be a Schema +// or an array of Schema. Mainly here for serialization purposes +type SchemaOrArray struct { + Schema *Schema + Schemas []Schema +} + +// Len returns the number of schemas in this property +func (s SchemaOrArray) Len() int { + if s.Schema != nil { + return 1 + } + return len(s.Schemas) +} + +// ContainsType returns true when one of the schemas is of the specified type +func (s *SchemaOrArray) ContainsType(name string) bool { + if s.Schema != nil { + return s.Schema.Type != nil && s.Schema.Type.Contains(name) + } + return false +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalJSON() ([]byte, error) { + if len(s.Schemas) > 0 { + return json.Marshal(s.Schemas) + } + return json.Marshal(s.Schema) +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { + var nw SchemaOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Schemas); err != nil { + return err + } + } + *s = nw + return nil +} + +// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go new file mode 100644 index 000000000..97f555840 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -0,0 +1,73 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +type TagProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +// NewTag creates a new tag +func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { + return Tag{TagProps: TagProps{description, name, externalDocs}} +} + +// Tag allows adding meta data to a single tag that is used by the [Operation Object](http://goo.gl/8us55a#operationObject). +// It is not mandatory to have a Tag Object per tag used there. +// +// For more information: http://goo.gl/8us55a#tagObject +type Tag struct { + VendorExtensible + TagProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (t Tag) JSONLookup(token string) (interface{}, error) { + if ex, ok := t.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(t.TagProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (t Tag) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(t.TagProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(t.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (t *Tag) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &t.TagProps); err != nil { + return err + } + return json.Unmarshal(data, &t.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go new file mode 100644 index 000000000..945a46703 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/xml_object.go @@ -0,0 +1,68 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// XMLObject a metadata object that allows for more fine-tuned XML model definitions. +// +// For more information: http://goo.gl/8us55a#xmlObject +type XMLObject struct { + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Prefix string `json:"prefix,omitempty"` + Attribute bool `json:"attribute,omitempty"` + Wrapped bool `json:"wrapped,omitempty"` +} + +// WithName sets the xml name for the object +func (x *XMLObject) WithName(name string) *XMLObject { + x.Name = name + return x +} + +// WithNamespace sets the xml namespace for the object +func (x *XMLObject) WithNamespace(namespace string) *XMLObject { + x.Namespace = namespace + return x +} + +// WithPrefix sets the xml prefix for the object +func (x *XMLObject) WithPrefix(prefix string) *XMLObject { + x.Prefix = prefix + return x +} + +// AsAttribute flags this object as xml attribute +func (x *XMLObject) AsAttribute() *XMLObject { + x.Attribute = true + return x +} + +// AsElement flags this object as an xml node +func (x *XMLObject) AsElement() *XMLObject { + x.Attribute = false + return x +} + +// AsWrapped flags this object as wrapped, this is mostly useful for array types +func (x *XMLObject) AsWrapped() *XMLObject { + x.Wrapped = true + return x +} + +// AsUnwrapped flags this object as an xml node +func (x *XMLObject) AsUnwrapped() *XMLObject { + x.Wrapped = false + return x +} diff --git a/vendor/github.com/go-openapi/swag/LICENSE b/vendor/github.com/go-openapi/swag/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md new file mode 100644 index 000000000..c1d3c196c --- /dev/null +++ b/vendor/github.com/go-openapi/swag/README.md @@ -0,0 +1,12 @@ +# Swag [![Build Status](https://ci.vmware.run/api/badges/go-openapi/swag/status.svg)](https://ci.vmware.run/go-openapi/swag) [![Coverage](https://coverage.vmware.run/badges/go-openapi/swag/coverage.svg)](https://coverage.vmware.run/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) + +Contains a bunch of helper functions: + +* convert between value and pointers for builtins +* convert from string to builtin +* fast json concatenation +* search in path +* load from file or http +* name manglin \ No newline at end of file diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go new file mode 100644 index 000000000..28d912410 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/convert.go @@ -0,0 +1,188 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "math" + "strconv" + "strings" +) + +// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER +const ( + maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 +) + +// IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive +func IsFloat64AJSONInteger(f float64) bool { + if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat { + return false + } + + return f == float64(int64(f)) || f == float64(uint64(f)) +} + +var evaluatesAsTrue = map[string]struct{}{ + "true": struct{}{}, + "1": struct{}{}, + "yes": struct{}{}, + "ok": struct{}{}, + "y": struct{}{}, + "on": struct{}{}, + "selected": struct{}{}, + "checked": struct{}{}, + "t": struct{}{}, + "enabled": struct{}{}, +} + +// ConvertBool turn a string into a boolean +func ConvertBool(str string) (bool, error) { + _, ok := evaluatesAsTrue[strings.ToLower(str)] + return ok, nil +} + +// ConvertFloat32 turn a string into a float32 +func ConvertFloat32(str string) (float32, error) { + f, err := strconv.ParseFloat(str, 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +// ConvertFloat64 turn a string into a float64 +func ConvertFloat64(str string) (float64, error) { + return strconv.ParseFloat(str, 64) +} + +// ConvertInt8 turn a string into int8 boolean +func ConvertInt8(str string) (int8, error) { + i, err := strconv.ParseInt(str, 10, 8) + if err != nil { + return 0, err + } + return int8(i), nil +} + +// ConvertInt16 turn a string into a int16 +func ConvertInt16(str string) (int16, error) { + i, err := strconv.ParseInt(str, 10, 16) + if err != nil { + return 0, err + } + return int16(i), nil +} + +// ConvertInt32 turn a string into a int32 +func ConvertInt32(str string) (int32, error) { + i, err := strconv.ParseInt(str, 10, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// ConvertInt64 turn a string into a int64 +func ConvertInt64(str string) (int64, error) { + return strconv.ParseInt(str, 10, 64) +} + +// ConvertUint8 turn a string into a uint8 +func ConvertUint8(str string) (uint8, error) { + i, err := strconv.ParseUint(str, 10, 8) + if err != nil { + return 0, err + } + return uint8(i), nil +} + +// ConvertUint16 turn a string into a uint16 +func ConvertUint16(str string) (uint16, error) { + i, err := strconv.ParseUint(str, 10, 16) + if err != nil { + return 0, err + } + return uint16(i), nil +} + +// ConvertUint32 turn a string into a uint32 +func ConvertUint32(str string) (uint32, error) { + i, err := strconv.ParseUint(str, 10, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// ConvertUint64 turn a string into a uint64 +func ConvertUint64(str string) (uint64, error) { + return strconv.ParseUint(str, 10, 64) +} + +// FormatBool turns a boolean into a string +func FormatBool(value bool) string { + return strconv.FormatBool(value) +} + +// FormatFloat32 turns a float32 into a string +func FormatFloat32(value float32) string { + return strconv.FormatFloat(float64(value), 'f', -1, 32) +} + +// FormatFloat64 turns a float64 into a string +func FormatFloat64(value float64) string { + return strconv.FormatFloat(value, 'f', -1, 64) +} + +// FormatInt8 turns an int8 into a string +func FormatInt8(value int8) string { + return strconv.FormatInt(int64(value), 10) +} + +// FormatInt16 turns an int16 into a string +func FormatInt16(value int16) string { + return strconv.FormatInt(int64(value), 10) +} + +// FormatInt32 turns an int32 into a string +func FormatInt32(value int32) string { + return strconv.FormatInt(int64(value), 10) +} + +// FormatInt64 turns an int64 into a string +func FormatInt64(value int64) string { + return strconv.FormatInt(value, 10) +} + +// FormatUint8 turns an uint8 into a string +func FormatUint8(value uint8) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatUint16 turns an uint16 into a string +func FormatUint16(value uint16) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatUint32 turns an uint32 into a string +func FormatUint32(value uint32) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatUint64 turns an uint64 into a string +func FormatUint64(value uint64) string { + return strconv.FormatUint(value, 10) +} diff --git a/vendor/github.com/go-openapi/swag/convert_types.go b/vendor/github.com/go-openapi/swag/convert_types.go new file mode 100644 index 000000000..c95e4e78b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/convert_types.go @@ -0,0 +1,595 @@ +package swag + +import "time" + +// This file was taken from the aws go sdk + +// String returns a pointer to of the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to of the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to of the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to of the int64 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int64 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to of the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint returns a pouinter to of the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pouinter passed in or +// 0 if the pouinter is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pouinters +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pouinters uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pouinters +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pouinters uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pouinter to of the uint64 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint64 pouinter passed in or +// 0 if the pouinter is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint64 values uinto a slice of +// uint32 pouinters +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pouinters uinto a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values uinto a string +// map of uint32 pouinters +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pouinters uinto a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pouinter to of the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pouinter passed in or +// 0 if the pouinter is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values uinto a slice of +// uint64 pouinters +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pouinters uinto a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values uinto a string +// map of uint64 pouinters +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pouinters uinto a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to of the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to of the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go new file mode 100644 index 000000000..6e9ec20fc --- /dev/null +++ b/vendor/github.com/go-openapi/swag/json.go @@ -0,0 +1,270 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "bytes" + "encoding/json" + "reflect" + "strings" + "sync" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// DefaultJSONNameProvider the default cache for types +var DefaultJSONNameProvider = NewNameProvider() + +const comma = byte(',') + +var closers = map[byte]byte{ + '{': '}', + '[': ']', +} + +type ejMarshaler interface { + MarshalEasyJSON(w *jwriter.Writer) +} + +type ejUnmarshaler interface { + UnmarshalEasyJSON(w *jlexer.Lexer) +} + +// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaller +// so it takes the fastest option available. +func WriteJSON(data interface{}) ([]byte, error) { + if d, ok := data.(ejMarshaler); ok { + jw := new(jwriter.Writer) + d.MarshalEasyJSON(jw) + return jw.BuildBytes() + } + if d, ok := data.(json.Marshaler); ok { + return d.MarshalJSON() + } + return json.Marshal(data) +} + +// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaller +// so it takes the fastes option available +func ReadJSON(data []byte, value interface{}) error { + if d, ok := value.(ejUnmarshaler); ok { + jl := &jlexer.Lexer{Data: data} + d.UnmarshalEasyJSON(jl) + return jl.Error() + } + if d, ok := value.(json.Unmarshaler); ok { + return d.UnmarshalJSON(data) + } + return json.Unmarshal(data, value) +} + +// DynamicJSONToStruct converts an untyped json structure into a struct +func DynamicJSONToStruct(data interface{}, target interface{}) error { + // TODO: convert straight to a json typed map (mergo + iterate?) + b, err := WriteJSON(data) + if err != nil { + return err + } + if err := ReadJSON(b, target); err != nil { + return err + } + return nil +} + +// ConcatJSON concatenates multiple json objects efficiently +func ConcatJSON(blobs ...[]byte) []byte { + if len(blobs) == 0 { + return nil + } + if len(blobs) == 1 { + return blobs[0] + } + + last := len(blobs) - 1 + var opening, closing byte + a := 0 + idx := 0 + buf := bytes.NewBuffer(nil) + + for i, b := range blobs { + if len(b) > 0 && opening == 0 { // is this an array or an object? + opening, closing = b[0], closers[b[0]] + } + + if opening != '{' && opening != '[' { + continue // don't know how to concatenate non container objects + } + + if len(b) < 3 { // yep empty but also the last one, so closing this thing + if i == last && a > 0 { + buf.WriteByte(closing) + } + continue + } + + idx = 0 + if a > 0 { // we need to join with a comma for everything beyond the first non-empty item + buf.WriteByte(comma) + idx = 1 // this is not the first or the last so we want to drop the leading bracket + } + + if i != last { // not the last one, strip brackets + buf.Write(b[idx : len(b)-1]) + } else { // last one, strip only the leading bracket + buf.Write(b[idx:]) + } + a++ + } + // somehow it ended up being empty, so provide a default value + if buf.Len() == 0 { + buf.WriteByte(opening) + buf.WriteByte(closing) + } + return buf.Bytes() +} + +// ToDynamicJSON turns an object into a properly JSON typed structure +func ToDynamicJSON(data interface{}) interface{} { + // TODO: convert straight to a json typed map (mergo + iterate?) + b, _ := json.Marshal(data) + var res interface{} + json.Unmarshal(b, &res) + return res +} + +// FromDynamicJSON turns an object into a properly JSON typed structure +func FromDynamicJSON(data, target interface{}) error { + b, _ := json.Marshal(data) + return json.Unmarshal(b, target) +} + +// NameProvider represents an object capabale of translating from go property names +// to json property names +// This type is thread-safe. +type NameProvider struct { + lock *sync.Mutex + index map[reflect.Type]nameIndex +} + +type nameIndex struct { + jsonNames map[string]string + goNames map[string]string +} + +// NewNameProvider creates a new name provider +func NewNameProvider() *NameProvider { + return &NameProvider{ + lock: &sync.Mutex{}, + index: make(map[reflect.Type]nameIndex), + } +} + +func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) { + for i := 0; i < tpe.NumField(); i++ { + targetDes := tpe.Field(i) + + if targetDes.PkgPath != "" { // unexported + continue + } + + if targetDes.Anonymous { // walk embedded structures tree down first + buildnameIndex(targetDes.Type, idx, reverseIdx) + continue + } + + if tag := targetDes.Tag.Get("json"); tag != "" { + + parts := strings.Split(tag, ",") + if len(parts) == 0 { + continue + } + + nm := parts[0] + if nm == "-" { + continue + } + if nm == "" { // empty string means we want to use the Go name + nm = targetDes.Name + } + + idx[nm] = targetDes.Name + reverseIdx[targetDes.Name] = nm + } + } +} + +func newNameIndex(tpe reflect.Type) nameIndex { + var idx = make(map[string]string, tpe.NumField()) + var reverseIdx = make(map[string]string, tpe.NumField()) + + buildnameIndex(tpe, idx, reverseIdx) + return nameIndex{jsonNames: idx, goNames: reverseIdx} +} + +// GetJSONNames gets all the json property names for a type +func (n *NameProvider) GetJSONNames(subject interface{}) []string { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + + var res []string + for k := range names.jsonNames { + res = append(res, k) + } + return res +} + +// GetJSONName gets the json name for a go property name +func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + return n.GetJSONNameForType(tpe, name) +} + +// GetJSONNameForType gets the json name for a go property name on a given type +func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) { + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + nme, ok := names.goNames[name] + return nme, ok +} + +func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex { + n.lock.Lock() + defer n.lock.Unlock() + names := newNameIndex(tpe) + n.index[tpe] = names + return names +} + +// GetGoName gets the go name for a json property name +func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + return n.GetGoNameForType(tpe, name) +} + +// GetGoNameForType gets the go name for a given type for a json property name +func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) { + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + nme, ok := names.jsonNames[name] + return nme, ok +} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go new file mode 100644 index 000000000..6dbc31330 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -0,0 +1,49 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "fmt" + "io/ioutil" + "net/http" + "strings" +) + +// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in +func LoadFromFileOrHTTP(path string) ([]byte, error) { + return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes)(path) +} + +// LoadStrategy returns a loader function for a given path or uri +func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { + if strings.HasPrefix(path, "http") { + return remote + } + return local +} + +func loadHTTPBytes(path string) ([]byte, error) { + resp, err := http.Get(path) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) + } + + return ioutil.ReadAll(resp.Body) +} diff --git a/vendor/github.com/go-openapi/swag/net.go b/vendor/github.com/go-openapi/swag/net.go new file mode 100644 index 000000000..8323fa37b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/net.go @@ -0,0 +1,24 @@ +package swag + +import ( + "net" + "strconv" +) + +// SplitHostPort splits a network address into a host and a port. +// The port is -1 when there is no port to be found +func SplitHostPort(addr string) (host string, port int, err error) { + h, p, err := net.SplitHostPort(addr) + if err != nil { + return "", -1, err + } + if p == "" { + return "", -1, &net.AddrError{Err: "missing port in address", Addr: addr} + } + + pi, err := strconv.Atoi(p) + if err != nil { + return "", -1, err + } + return h, pi, nil +} diff --git a/vendor/github.com/go-openapi/swag/path.go b/vendor/github.com/go-openapi/swag/path.go new file mode 100644 index 000000000..273e9fbed --- /dev/null +++ b/vendor/github.com/go-openapi/swag/path.go @@ -0,0 +1,56 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "os" + "path/filepath" + "runtime" + "strings" +) + +const ( + // GOPATHKey represents the env key for gopath + GOPATHKey = "GOPATH" +) + +// FindInSearchPath finds a package in a provided lists of paths +func FindInSearchPath(searchPath, pkg string) string { + pathsList := filepath.SplitList(searchPath) + for _, path := range pathsList { + if evaluatedPath, err := filepath.EvalSymlinks(filepath.Join(path, "src", pkg)); err == nil { + if _, err := os.Stat(evaluatedPath); err == nil { + return evaluatedPath + } + } + } + return "" +} + +// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT +func FindInGoSearchPath(pkg string) string { + return FindInSearchPath(FullGoSearchPath(), pkg) +} + +// FullGoSearchPath gets the search paths for finding packages +func FullGoSearchPath() string { + allPaths := os.Getenv(GOPATHKey) + if allPaths != "" { + allPaths = strings.Join([]string{allPaths, runtime.GOROOT()}, ":") + } else { + allPaths = runtime.GOROOT() + } + return allPaths +} diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go new file mode 100644 index 000000000..d8b54ee61 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/util.go @@ -0,0 +1,318 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "math" + "reflect" + "regexp" + "sort" + "strings" +) + +// Taken from https://github.com/golang/lint/blob/1fab560e16097e5b69afb66eb93aab843ef77845/lint.go#L663-L698 +var commonInitialisms = map[string]bool{ + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UUID": true, + "UID": true, + "UI": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XSRF": true, + "XSS": true, +} +var initialisms []string + +func init() { + for k := range commonInitialisms { + initialisms = append(initialisms, k) + } + sort.Sort(sort.Reverse(byLength(initialisms))) +} + +// JoinByFormat joins a string array by a known format: +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) +func JoinByFormat(data []string, format string) []string { + if len(data) == 0 { + return data + } + var sep string + switch format { + case "ssv": + sep = " " + case "tsv": + sep = "\t" + case "pipes": + sep = "|" + case "multi": + return data + default: + sep = "," + } + return []string{strings.Join(data, sep)} +} + +// SplitByFormat splits a string by a known format: +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) +func SplitByFormat(data, format string) []string { + if data == "" { + return nil + } + var sep string + switch format { + case "ssv": + sep = " " + case "tsv": + sep = "\t" + case "pipes": + sep = "|" + case "multi": + return nil + default: + sep = "," + } + var result []string + for _, s := range strings.Split(data, sep) { + if ts := strings.TrimSpace(s); ts != "" { + result = append(result, ts) + } + } + return result +} + +type byLength []string + +func (s byLength) Len() int { + return len(s) +} +func (s byLength) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byLength) Less(i, j int) bool { + return len(s[i]) < len(s[j]) +} + +// Prepares strings by splitting by caps, spaces, dashes, and underscore +func split(str string) (words []string) { + repl := strings.NewReplacer( + "@", "At ", + "&", "And ", + "|", "Pipe ", + "$", "Dollar ", + "!", "Bang ", + "-", " ", + "_", " ", + ) + + rex1 := regexp.MustCompile(`(\p{Lu})`) + rex2 := regexp.MustCompile(`(\pL|\pM|\pN|\p{Pc})+`) + + str = trim(str) + + // Convert dash and underscore to spaces + str = repl.Replace(str) + + // Split when uppercase is found (needed for Snake) + str = rex1.ReplaceAllString(str, " $1") + // check if consecutive single char things make up an initialism + + for _, k := range initialisms { + str = strings.Replace(str, rex1.ReplaceAllString(k, " $1"), " "+k, -1) + } + // Get the final list of words + words = rex2.FindAllString(str, -1) + + return +} + +// Removes leading whitespaces +func trim(str string) string { + return strings.Trim(str, " ") +} + +// Shortcut to strings.ToUpper() +func upper(str string) string { + return strings.ToUpper(trim(str)) +} + +// Shortcut to strings.ToLower() +func lower(str string) string { + return strings.ToLower(trim(str)) +} + +// ToFileName lowercases and underscores a go type name +func ToFileName(name string) string { + var out []string + for _, w := range split(name) { + out = append(out, lower(w)) + } + return strings.Join(out, "_") +} + +// ToCommandName lowercases and underscores a go type name +func ToCommandName(name string) string { + var out []string + for _, w := range split(name) { + out = append(out, lower(w)) + } + return strings.Join(out, "-") +} + +// ToHumanNameLower represents a code name as a human series of words +func ToHumanNameLower(name string) string { + var out []string + for _, w := range split(name) { + if !commonInitialisms[upper(w)] { + out = append(out, lower(w)) + } else { + out = append(out, w) + } + } + return strings.Join(out, " ") +} + +// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized +func ToHumanNameTitle(name string) string { + var out []string + for _, w := range split(name) { + uw := upper(w) + if !commonInitialisms[uw] { + out = append(out, upper(w[:1])+lower(w[1:])) + } else { + out = append(out, w) + } + } + return strings.Join(out, " ") +} + +// ToJSONName camelcases a name which can be underscored or pascal cased +func ToJSONName(name string) string { + var out []string + for i, w := range split(name) { + if i == 0 { + out = append(out, lower(w)) + continue + } + out = append(out, upper(w[:1])+lower(w[1:])) + } + return strings.Join(out, "") +} + +// ToVarName camelcases a name which can be underscored or pascal cased +func ToVarName(name string) string { + res := ToGoName(name) + if len(res) <= 1 { + return lower(res) + } + return lower(res[:1]) + res[1:] +} + +// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes +func ToGoName(name string) string { + var out []string + for _, w := range split(name) { + uw := upper(w) + mod := int(math.Min(float64(len(uw)), 2)) + if !commonInitialisms[uw] && !commonInitialisms[uw[:len(uw)-mod]] { + uw = upper(w[:1]) + lower(w[1:]) + } + out = append(out, uw) + } + return strings.Join(out, "") +} + +// ContainsStringsCI searches a slice of strings for a case-insensitive match +func ContainsStringsCI(coll []string, item string) bool { + for _, a := range coll { + if strings.EqualFold(a, item) { + return true + } + } + return false +} + +type zeroable interface { + IsZero() bool +} + +// IsZero returns true when the value passed into the function is a zero value. +// This allows for safer checking of interface values. +func IsZero(data interface{}) bool { + // check for things that have an IsZero method instead + if vv, ok := data.(zeroable); ok { + return vv.IsZero() + } + // continue with slightly more complex reflection + v := reflect.ValueOf(data) + switch v.Kind() { + case reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + case reflect.Struct, reflect.Array: + return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) + case reflect.Invalid: + return true + } + return false +} + +// CommandLineOptionsGroup represents a group of user-defined command line options +type CommandLineOptionsGroup struct { + ShortDescription string + LongDescription string + Options interface{} +} diff --git a/vendor/github.com/google/gofuzz/LICENSE b/vendor/github.com/google/gofuzz/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/google/gofuzz/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md new file mode 100644 index 000000000..64869af34 --- /dev/null +++ b/vendor/github.com/google/gofuzz/README.md @@ -0,0 +1,71 @@ +gofuzz +====== + +gofuzz is a library for populating go objects with random values. + +[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz) +[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) + +This is useful for testing: + +* Do your project's objects really serialize/unserialize correctly in all cases? +* Is there an incorrectly formatted object that will cause your project to panic? + +Import with ```import "github.com/google/gofuzz"``` + +You can use it on single variables: +```go +f := fuzz.New() +var myInt int +f.Fuzz(&myInt) // myInt gets a random value. +``` + +You can use it on maps: +```go +f := fuzz.New().NilChance(0).NumElements(1, 1) +var myMap map[ComplexKeyType]string +f.Fuzz(&myMap) // myMap will have exactly one element. +``` + +Customize the chance of getting a nil pointer: +```go +f := fuzz.New().NilChance(.5) +var fancyStruct struct { + A, B, C, D *string +} +f.Fuzz(&fancyStruct) // About half the pointers should be set. +``` + +You can even customize the randomization completely if needed: +```go +type MyEnum string +const ( + A MyEnum = "A" + B MyEnum = "B" +) +type MyInfo struct { + Type MyEnum + AInfo *string + BInfo *string +} + +f := fuzz.New().NilChance(0).Funcs( + func(e *MyInfo, c fuzz.Continue) { + switch c.Intn(2) { + case 0: + e.Type = A + c.Fuzz(&e.AInfo) + case 1: + e.Type = B + c.Fuzz(&e.BInfo) + } + }, +) + +var myObject MyInfo +f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. +``` + +See more examples in ```example_test.go```. + +Happy testing! diff --git a/vendor/github.com/google/gofuzz/doc.go b/vendor/github.com/google/gofuzz/doc.go new file mode 100644 index 000000000..9f9956d4a --- /dev/null +++ b/vendor/github.com/google/gofuzz/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fuzz is a library for populating go objects with random values. +package fuzz diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go new file mode 100644 index 000000000..4f888fbc8 --- /dev/null +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -0,0 +1,453 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fuzz + +import ( + "fmt" + "math/rand" + "reflect" + "time" +) + +// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. +type fuzzFuncMap map[reflect.Type]reflect.Value + +// Fuzzer knows how to fill any object with random fields. +type Fuzzer struct { + fuzzFuncs fuzzFuncMap + defaultFuzzFuncs fuzzFuncMap + r *rand.Rand + nilChance float64 + minElements int + maxElements int +} + +// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, +// RandSource, NilChance, or NumElements in any order. +func New() *Fuzzer { + f := &Fuzzer{ + defaultFuzzFuncs: fuzzFuncMap{ + reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), + }, + + fuzzFuncs: fuzzFuncMap{}, + r: rand.New(rand.NewSource(time.Now().UnixNano())), + nilChance: .2, + minElements: 1, + maxElements: 10, + } + return f +} + +// Funcs adds each entry in fuzzFuncs as a custom fuzzing function. +// +// Each entry in fuzzFuncs must be a function taking two parameters. +// The first parameter must be a pointer or map. It is the variable that +// function will fill with random data. The second parameter must be a +// fuzz.Continue, which will provide a source of randomness and a way +// to automatically continue fuzzing smaller pieces of the first parameter. +// +// These functions are called sensibly, e.g., if you wanted custom string +// fuzzing, the function `func(s *string, c fuzz.Continue)` would get +// called and passed the address of strings. Maps and pointers will always +// be made/new'd for you, ignoring the NilChange option. For slices, it +// doesn't make much sense to pre-create them--Fuzzer doesn't know how +// long you want your slice--so take a pointer to a slice, and make it +// yourself. (If you don't want your map/pointer type pre-made, take a +// pointer to it, and make it yourself.) See the examples for a range of +// custom functions. +func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer { + for i := range fuzzFuncs { + v := reflect.ValueOf(fuzzFuncs[i]) + if v.Kind() != reflect.Func { + panic("Need only funcs!") + } + t := v.Type() + if t.NumIn() != 2 || t.NumOut() != 0 { + panic("Need 2 in and 0 out params!") + } + argT := t.In(0) + switch argT.Kind() { + case reflect.Ptr, reflect.Map: + default: + panic("fuzzFunc must take pointer or map type") + } + if t.In(1) != reflect.TypeOf(Continue{}) { + panic("fuzzFunc's second parameter must be type fuzz.Continue") + } + f.fuzzFuncs[argT] = v + } + return f +} + +// RandSource causes f to get values from the given source of randomness. +// Use if you want deterministic fuzzing. +func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer { + f.r = rand.New(s) + return f +} + +// NilChance sets the probability of creating a nil pointer, map, or slice to +// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive. +func (f *Fuzzer) NilChance(p float64) *Fuzzer { + if p < 0 || p > 1 { + panic("p should be between 0 and 1, inclusive.") + } + f.nilChance = p + return f +} + +// NumElements sets the minimum and maximum number of elements that will be +// added to a non-nil map or slice. +func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer { + if atLeast > atMost { + panic("atLeast must be <= atMost") + } + if atLeast < 0 { + panic("atLeast must be >= 0") + } + f.minElements = atLeast + f.maxElements = atMost + return f +} + +func (f *Fuzzer) genElementCount() int { + if f.minElements == f.maxElements { + return f.minElements + } + return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) +} + +func (f *Fuzzer) genShouldFill() bool { + return f.r.Float64() > f.nilChance +} + +// Fuzz recursively fills all of obj's fields with something random. First +// this tries to find a custom fuzz function (see Funcs). If there is no +// custom function this tests whether the object implements fuzz.Interface and, +// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if +// there is a default fuzz function provided by this package. If all of that +// fails, this will generate random values for all primitive fields and then +// recurse for all non-primitives. +// +// Not safe for cyclic or tree-like structs! +// +// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) +// Intended for tests, so will panic on bad input or unimplemented fields. +func (f *Fuzzer) Fuzz(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + f.doFuzz(v, 0) +} + +// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for +// obj's type will not be called and obj will not be tested for fuzz.Interface +// conformance. This applies only to obj and not other instances of obj's +// type. +// Not safe for cyclic or tree-like structs! +// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) +// Intended for tests, so will panic on bad input or unimplemented fields. +func (f *Fuzzer) FuzzNoCustom(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + f.doFuzz(v, flagNoCustomFuzz) +} + +const ( + // Do not try to find a custom fuzz function. Does not apply recursively. + flagNoCustomFuzz uint64 = 1 << iota +) + +func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) { + if !v.CanSet() { + return + } + + if flags&flagNoCustomFuzz == 0 { + // Check for both pointer and non-pointer custom functions. + if v.CanAddr() && f.tryCustom(v.Addr()) { + return + } + if f.tryCustom(v) { + return + } + } + + if fn, ok := fillFuncMap[v.Kind()]; ok { + fn(v, f.r) + return + } + switch v.Kind() { + case reflect.Map: + if f.genShouldFill() { + v.Set(reflect.MakeMap(v.Type())) + n := f.genElementCount() + for i := 0; i < n; i++ { + key := reflect.New(v.Type().Key()).Elem() + f.doFuzz(key, 0) + val := reflect.New(v.Type().Elem()).Elem() + f.doFuzz(val, 0) + v.SetMapIndex(key, val) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Ptr: + if f.genShouldFill() { + v.Set(reflect.New(v.Type().Elem())) + f.doFuzz(v.Elem(), 0) + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Slice: + if f.genShouldFill() { + n := f.genElementCount() + v.Set(reflect.MakeSlice(v.Type(), n, n)) + for i := 0; i < n; i++ { + f.doFuzz(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Array: + if f.genShouldFill() { + n := v.Len() + for i := 0; i < n; i++ { + f.doFuzz(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + f.doFuzz(v.Field(i), 0) + } + case reflect.Chan: + fallthrough + case reflect.Func: + fallthrough + case reflect.Interface: + fallthrough + default: + panic(fmt.Sprintf("Can't handle %#v", v.Interface())) + } +} + +// tryCustom searches for custom handlers, and returns true iff it finds a match +// and successfully randomizes v. +func (f *Fuzzer) tryCustom(v reflect.Value) bool { + // First: see if we have a fuzz function for it. + doCustom, ok := f.fuzzFuncs[v.Type()] + if !ok { + // Second: see if it can fuzz itself. + if v.CanInterface() { + intf := v.Interface() + if fuzzable, ok := intf.(Interface); ok { + fuzzable.Fuzz(Continue{f: f, Rand: f.r}) + return true + } + } + // Finally: see if there is a default fuzz function. + doCustom, ok = f.defaultFuzzFuncs[v.Type()] + if !ok { + return false + } + } + + switch v.Kind() { + case reflect.Ptr: + if v.IsNil() { + if !v.CanSet() { + return false + } + v.Set(reflect.New(v.Type().Elem())) + } + case reflect.Map: + if v.IsNil() { + if !v.CanSet() { + return false + } + v.Set(reflect.MakeMap(v.Type())) + } + default: + return false + } + + doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ + f: f, + Rand: f.r, + })}) + return true +} + +// Interface represents an object that knows how to fuzz itself. Any time we +// find a type that implements this interface we will delegate the act of +// fuzzing itself. +type Interface interface { + Fuzz(c Continue) +} + +// Continue can be passed to custom fuzzing functions to allow them to use +// the correct source of randomness and to continue fuzzing their members. +type Continue struct { + f *Fuzzer + + // For convenience, Continue implements rand.Rand via embedding. + // Use this for generating any randomness if you want your fuzzing + // to be repeatable for a given seed. + *rand.Rand +} + +// Fuzz continues fuzzing obj. obj must be a pointer. +func (c Continue) Fuzz(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + c.f.doFuzz(v, 0) +} + +// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for +// obj's type will not be called and obj will not be tested for fuzz.Interface +// conformance. This applies only to obj and not other instances of obj's +// type. +func (c Continue) FuzzNoCustom(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + c.f.doFuzz(v, flagNoCustomFuzz) +} + +// RandString makes a random string up to 20 characters long. The returned string +// may include a variety of (valid) UTF-8 encodings. +func (c Continue) RandString() string { + return randString(c.Rand) +} + +// RandUint64 makes random 64 bit numbers. +// Weirdly, rand doesn't have a function that gives you 64 random bits. +func (c Continue) RandUint64() uint64 { + return randUint64(c.Rand) +} + +// RandBool returns true or false randomly. +func (c Continue) RandBool() bool { + return randBool(c.Rand) +} + +func fuzzInt(v reflect.Value, r *rand.Rand) { + v.SetInt(int64(randUint64(r))) +} + +func fuzzUint(v reflect.Value, r *rand.Rand) { + v.SetUint(randUint64(r)) +} + +func fuzzTime(t *time.Time, c Continue) { + var sec, nsec int64 + // Allow for about 1000 years of random time values, which keeps things + // like JSON parsing reasonably happy. + sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) + c.Fuzz(&nsec) + *t = time.Unix(sec, nsec) +} + +var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ + reflect.Bool: func(v reflect.Value, r *rand.Rand) { + v.SetBool(randBool(r)) + }, + reflect.Int: fuzzInt, + reflect.Int8: fuzzInt, + reflect.Int16: fuzzInt, + reflect.Int32: fuzzInt, + reflect.Int64: fuzzInt, + reflect.Uint: fuzzUint, + reflect.Uint8: fuzzUint, + reflect.Uint16: fuzzUint, + reflect.Uint32: fuzzUint, + reflect.Uint64: fuzzUint, + reflect.Uintptr: fuzzUint, + reflect.Float32: func(v reflect.Value, r *rand.Rand) { + v.SetFloat(float64(r.Float32())) + }, + reflect.Float64: func(v reflect.Value, r *rand.Rand) { + v.SetFloat(r.Float64()) + }, + reflect.Complex64: func(v reflect.Value, r *rand.Rand) { + panic("unimplemented") + }, + reflect.Complex128: func(v reflect.Value, r *rand.Rand) { + panic("unimplemented") + }, + reflect.String: func(v reflect.Value, r *rand.Rand) { + v.SetString(randString(r)) + }, + reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { + panic("unimplemented") + }, +} + +// randBool returns true or false randomly. +func randBool(r *rand.Rand) bool { + if r.Int()&1 == 1 { + return true + } + return false +} + +type charRange struct { + first, last rune +} + +// choose returns a random unicode character from the given range, using the +// given randomness source. +func (r *charRange) choose(rand *rand.Rand) rune { + count := int64(r.last - r.first) + return r.first + rune(rand.Int63n(count)) +} + +var unicodeRanges = []charRange{ + {' ', '~'}, // ASCII characters + {'\u00a0', '\u02af'}, // Multi-byte encoded characters + {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) +} + +// randString makes a random string up to 20 characters long. The returned string +// may include a variety of (valid) UTF-8 encodings. +func randString(r *rand.Rand) string { + n := r.Intn(20) + runes := make([]rune, n) + for i := range runes { + runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) + } + return string(runes) +} + +// randUint64 makes random 64 bit numbers. +// Weirdly, rand doesn't have a function that gives you 64 random bits. +func randUint64(r *rand.Rand) uint64 { + return uint64(r.Uint32())<<32 | uint64(r.Uint32()) +} diff --git a/vendor/github.com/juju/ratelimit/LICENSE b/vendor/github.com/juju/ratelimit/LICENSE new file mode 100644 index 000000000..ade9307b3 --- /dev/null +++ b/vendor/github.com/juju/ratelimit/LICENSE @@ -0,0 +1,191 @@ +All files in this repository are licensed as follows. If you contribute +to this repository, it is assumed that you license your contribution +under the same license unless you state otherwise. + +All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/vendor/github.com/juju/ratelimit/README.md b/vendor/github.com/juju/ratelimit/README.md new file mode 100644 index 000000000..a0fdfe2b1 --- /dev/null +++ b/vendor/github.com/juju/ratelimit/README.md @@ -0,0 +1,117 @@ +# ratelimit +-- + import "github.com/juju/ratelimit" + +The ratelimit package provides an efficient token bucket implementation. See +http://en.wikipedia.org/wiki/Token_bucket. + +## Usage + +#### func Reader + +```go +func Reader(r io.Reader, bucket *Bucket) io.Reader +``` +Reader returns a reader that is rate limited by the given token bucket. Each +token in the bucket represents one byte. + +#### func Writer + +```go +func Writer(w io.Writer, bucket *Bucket) io.Writer +``` +Writer returns a writer that is rate limited by the given token bucket. Each +token in the bucket represents one byte. + +#### type Bucket + +```go +type Bucket struct { +} +``` + +Bucket represents a token bucket that fills at a predetermined rate. Methods on +Bucket may be called concurrently. + +#### func NewBucket + +```go +func NewBucket(fillInterval time.Duration, capacity int64) *Bucket +``` +NewBucket returns a new token bucket that fills at the rate of one token every +fillInterval, up to the given maximum capacity. Both arguments must be positive. +The bucket is initially full. + +#### func NewBucketWithQuantum + +```go +func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket +``` +NewBucketWithQuantum is similar to NewBucket, but allows the specification of +the quantum size - quantum tokens are added every fillInterval. + +#### func NewBucketWithRate + +```go +func NewBucketWithRate(rate float64, capacity int64) *Bucket +``` +NewBucketWithRate returns a token bucket that fills the bucket at the rate of +rate tokens per second up to the given maximum capacity. Because of limited +clock resolution, at high rates, the actual rate may be up to 1% different from +the specified rate. + +#### func (*Bucket) Rate + +```go +func (tb *Bucket) Rate() float64 +``` +Rate returns the fill rate of the bucket, in tokens per second. + +#### func (*Bucket) Take + +```go +func (tb *Bucket) Take(count int64) time.Duration +``` +Take takes count tokens from the bucket without blocking. It returns the time +that the caller should wait until the tokens are actually available. + +Note that if the request is irrevocable - there is no way to return tokens to +the bucket once this method commits us to taking them. + +#### func (*Bucket) TakeAvailable + +```go +func (tb *Bucket) TakeAvailable(count int64) int64 +``` +TakeAvailable takes up to count immediately available tokens from the bucket. It +returns the number of tokens removed, or zero if there are no available tokens. +It does not block. + +#### func (*Bucket) TakeMaxDuration + +```go +func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) +``` +TakeMaxDuration is like Take, except that it will only take tokens from the +bucket if the wait time for the tokens is no greater than maxWait. + +If it would take longer than maxWait for the tokens to become available, it does +nothing and reports false, otherwise it returns the time that the caller should +wait until the tokens are actually available, and reports true. + +#### func (*Bucket) Wait + +```go +func (tb *Bucket) Wait(count int64) +``` +Wait takes count tokens from the bucket, waiting until they are available. + +#### func (*Bucket) WaitMaxDuration + +```go +func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool +``` +WaitMaxDuration is like Wait except that it will only take tokens from the +bucket if it needs to wait for no greater than maxWait. It reports whether any +tokens have been removed from the bucket If no tokens have been removed, it +returns immediately. diff --git a/vendor/github.com/juju/ratelimit/ratelimit.go b/vendor/github.com/juju/ratelimit/ratelimit.go new file mode 100644 index 000000000..1c3f25b2e --- /dev/null +++ b/vendor/github.com/juju/ratelimit/ratelimit.go @@ -0,0 +1,284 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3 with static-linking exception. +// See LICENCE file for details. + +// Package ratelimit provides an efficient token bucket implementation +// that can be used to limit the rate of arbitrary things. +// See http://en.wikipedia.org/wiki/Token_bucket. +package ratelimit + +import ( + "math" + "strconv" + "sync" + "time" +) + +// Bucket represents a token bucket that fills at a predetermined rate. +// Methods on Bucket may be called concurrently. +type Bucket struct { + startTime time.Time + capacity int64 + quantum int64 + fillInterval time.Duration + clock Clock + + // The mutex guards the fields following it. + mu sync.Mutex + + // avail holds the number of available tokens + // in the bucket, as of availTick ticks from startTime. + // It will be negative when there are consumers + // waiting for tokens. + avail int64 + availTick int64 +} + +// Clock is used to inject testable fakes. +type Clock interface { + Now() time.Time + Sleep(d time.Duration) +} + +// realClock implements Clock in terms of standard time functions. +type realClock struct{} + +// Now is identical to time.Now. +func (realClock) Now() time.Time { + return time.Now() +} + +// Sleep is identical to time.Sleep. +func (realClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// NewBucket returns a new token bucket that fills at the +// rate of one token every fillInterval, up to the given +// maximum capacity. Both arguments must be +// positive. The bucket is initially full. +func NewBucket(fillInterval time.Duration, capacity int64) *Bucket { + return NewBucketWithClock(fillInterval, capacity, realClock{}) +} + +// NewBucketWithClock is identical to NewBucket but injects a testable clock +// interface. +func NewBucketWithClock(fillInterval time.Duration, capacity int64, clock Clock) *Bucket { + return NewBucketWithQuantumAndClock(fillInterval, capacity, 1, clock) +} + +// rateMargin specifes the allowed variance of actual +// rate from specified rate. 1% seems reasonable. +const rateMargin = 0.01 + +// NewBucketWithRate returns a token bucket that fills the bucket +// at the rate of rate tokens per second up to the given +// maximum capacity. Because of limited clock resolution, +// at high rates, the actual rate may be up to 1% different from the +// specified rate. +func NewBucketWithRate(rate float64, capacity int64) *Bucket { + return NewBucketWithRateAndClock(rate, capacity, realClock{}) +} + +// NewBucketWithRateAndClock is identical to NewBucketWithRate but injects a +// testable clock interface. +func NewBucketWithRateAndClock(rate float64, capacity int64, clock Clock) *Bucket { + for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) { + fillInterval := time.Duration(1e9 * float64(quantum) / rate) + if fillInterval <= 0 { + continue + } + tb := NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, clock) + if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin { + return tb + } + } + panic("cannot find suitable quantum for " + strconv.FormatFloat(rate, 'g', -1, 64)) +} + +// nextQuantum returns the next quantum to try after q. +// We grow the quantum exponentially, but slowly, so we +// get a good fit in the lower numbers. +func nextQuantum(q int64) int64 { + q1 := q * 11 / 10 + if q1 == q { + q1++ + } + return q1 +} + +// NewBucketWithQuantum is similar to NewBucket, but allows +// the specification of the quantum size - quantum tokens +// are added every fillInterval. +func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket { + return NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, realClock{}) +} + +// NewBucketWithQuantumAndClock is identical to NewBucketWithQuantum but injects +// a testable clock interface. +func NewBucketWithQuantumAndClock(fillInterval time.Duration, capacity, quantum int64, clock Clock) *Bucket { + if fillInterval <= 0 { + panic("token bucket fill interval is not > 0") + } + if capacity <= 0 { + panic("token bucket capacity is not > 0") + } + if quantum <= 0 { + panic("token bucket quantum is not > 0") + } + return &Bucket{ + clock: clock, + startTime: clock.Now(), + capacity: capacity, + quantum: quantum, + avail: capacity, + fillInterval: fillInterval, + } +} + +// Wait takes count tokens from the bucket, waiting until they are +// available. +func (tb *Bucket) Wait(count int64) { + if d := tb.Take(count); d > 0 { + tb.clock.Sleep(d) + } +} + +// WaitMaxDuration is like Wait except that it will +// only take tokens from the bucket if it needs to wait +// for no greater than maxWait. It reports whether +// any tokens have been removed from the bucket +// If no tokens have been removed, it returns immediately. +func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool { + d, ok := tb.TakeMaxDuration(count, maxWait) + if d > 0 { + tb.clock.Sleep(d) + } + return ok +} + +const infinityDuration time.Duration = 0x7fffffffffffffff + +// Take takes count tokens from the bucket without blocking. It returns +// the time that the caller should wait until the tokens are actually +// available. +// +// Note that if the request is irrevocable - there is no way to return +// tokens to the bucket once this method commits us to taking them. +func (tb *Bucket) Take(count int64) time.Duration { + d, _ := tb.take(tb.clock.Now(), count, infinityDuration) + return d +} + +// TakeMaxDuration is like Take, except that +// it will only take tokens from the bucket if the wait +// time for the tokens is no greater than maxWait. +// +// If it would take longer than maxWait for the tokens +// to become available, it does nothing and reports false, +// otherwise it returns the time that the caller should +// wait until the tokens are actually available, and reports +// true. +func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) { + return tb.take(tb.clock.Now(), count, maxWait) +} + +// TakeAvailable takes up to count immediately available tokens from the +// bucket. It returns the number of tokens removed, or zero if there are +// no available tokens. It does not block. +func (tb *Bucket) TakeAvailable(count int64) int64 { + return tb.takeAvailable(tb.clock.Now(), count) +} + +// takeAvailable is the internal version of TakeAvailable - it takes the +// current time as an argument to enable easy testing. +func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 { + if count <= 0 { + return 0 + } + tb.mu.Lock() + defer tb.mu.Unlock() + + tb.adjust(now) + if tb.avail <= 0 { + return 0 + } + if count > tb.avail { + count = tb.avail + } + tb.avail -= count + return count +} + +// Available returns the number of available tokens. It will be negative +// when there are consumers waiting for tokens. Note that if this +// returns greater than zero, it does not guarantee that calls that take +// tokens from the buffer will succeed, as the number of available +// tokens could have changed in the meantime. This method is intended +// primarily for metrics reporting and debugging. +func (tb *Bucket) Available() int64 { + return tb.available(tb.clock.Now()) +} + +// available is the internal version of available - it takes the current time as +// an argument to enable easy testing. +func (tb *Bucket) available(now time.Time) int64 { + tb.mu.Lock() + defer tb.mu.Unlock() + tb.adjust(now) + return tb.avail +} + +// Capacity returns the capacity that the bucket was created with. +func (tb *Bucket) Capacity() int64 { + return tb.capacity +} + +// Rate returns the fill rate of the bucket, in tokens per second. +func (tb *Bucket) Rate() float64 { + return 1e9 * float64(tb.quantum) / float64(tb.fillInterval) +} + +// take is the internal version of Take - it takes the current time as +// an argument to enable easy testing. +func (tb *Bucket) take(now time.Time, count int64, maxWait time.Duration) (time.Duration, bool) { + if count <= 0 { + return 0, true + } + tb.mu.Lock() + defer tb.mu.Unlock() + + currentTick := tb.adjust(now) + avail := tb.avail - count + if avail >= 0 { + tb.avail = avail + return 0, true + } + // Round up the missing tokens to the nearest multiple + // of quantum - the tokens won't be available until + // that tick. + endTick := currentTick + (-avail+tb.quantum-1)/tb.quantum + endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval) + waitTime := endTime.Sub(now) + if waitTime > maxWait { + return 0, false + } + tb.avail = avail + return waitTime, true +} + +// adjust adjusts the current bucket capacity based on the current time. +// It returns the current tick. +func (tb *Bucket) adjust(now time.Time) (currentTick int64) { + currentTick = int64(now.Sub(tb.startTime) / tb.fillInterval) + + if tb.avail >= tb.capacity { + return + } + tb.avail += (currentTick - tb.availTick) * tb.quantum + if tb.avail > tb.capacity { + tb.avail = tb.capacity + } + tb.availTick = currentTick + return +} diff --git a/vendor/github.com/juju/ratelimit/reader.go b/vendor/github.com/juju/ratelimit/reader.go new file mode 100644 index 000000000..6403bf78d --- /dev/null +++ b/vendor/github.com/juju/ratelimit/reader.go @@ -0,0 +1,51 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3 with static-linking exception. +// See LICENCE file for details. + +package ratelimit + +import "io" + +type reader struct { + r io.Reader + bucket *Bucket +} + +// Reader returns a reader that is rate limited by +// the given token bucket. Each token in the bucket +// represents one byte. +func Reader(r io.Reader, bucket *Bucket) io.Reader { + return &reader{ + r: r, + bucket: bucket, + } +} + +func (r *reader) Read(buf []byte) (int, error) { + n, err := r.r.Read(buf) + if n <= 0 { + return n, err + } + r.bucket.Wait(int64(n)) + return n, err +} + +type writer struct { + w io.Writer + bucket *Bucket +} + +// Writer returns a reader that is rate limited by +// the given token bucket. Each token in the bucket +// represents one byte. +func Writer(w io.Writer, bucket *Bucket) io.Writer { + return &writer{ + w: w, + bucket: bucket, + } +} + +func (w *writer) Write(buf []byte) (int, error) { + w.bucket.Wait(int64(len(buf))) + return w.w.Write(buf) +} diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE new file mode 100644 index 000000000..fbff658f7 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mailru/easyjson/README.md b/vendor/github.com/mailru/easyjson/README.md new file mode 100644 index 000000000..e990111fc --- /dev/null +++ b/vendor/github.com/mailru/easyjson/README.md @@ -0,0 +1,193 @@ +# easyjson [![Build Status](https://travis-ci.org/mailru/easyjson.svg?branch=master)](https://travis-ci.org/mailru/easyjson) + +easyjson allows to (un-)marshal JSON golang structs without the use of reflection by generating marshaller code. + +One of the aims of the library is to keep generated code simple enough so that it can be easily optimized or fixed. Another goal is to provide users with ability to customize the generated code not available in 'encoding/json', such as generating snake_case names or enabling 'omitempty' behavior by default. + +## usage +``` +go get github.com/mailru/easyjson/... +easyjson -all .go +``` + +This will generate `_easyjson.go` with marshaller/unmarshaller methods for structs. `GOPATH` variable needs to be set up correctly, since the generation invokes a `go run` on a temporary file (this is a really convenient approach to code generation borrowed from https://github.com/pquerna/ffjson). + +## options +``` +Usage of .root/bin/easyjson: + -all + generate un-/marshallers for all structs in a file + -build_tags string + build tags to add to generated file + -leave_temps + do not delete temporary files + -no_std_marshalers + don't generate MarshalJSON/UnmarshalJSON methods + -noformat + do not run 'gofmt -w' on output file + -omit_empty + omit empty fields by default + -snake_case + use snake_case names instead of CamelCase by default + -stubs + only generate stubs for marshallers/unmarshallers methods +``` + +Using `-all` will generate (un-)marshallers for all structs in the file. By default, structs need to have a line beginning with `easyjson:json` in their docstring, e.g.: +``` +//easyjson:json +struct A{} +``` + +`-snake_case` tells easyjson to generate snake\_case field names by default (unless explicitly overriden by a field tag). The CamelCase to snake\_case conversion algorithm should work in most cases (e.g. HTTPVersion will be converted to http_version). There can be names like JSONHTTPRPC where the conversion will return an unexpected result (jsonhttprpc without underscores), but such names require a dictionary to do the conversion and may be ambiguous. + +`-build_tags` will add corresponding build tag line for the generated file. +## marshaller/unmarshaller interfaces + +easyjson generates MarshalJSON/UnmarshalJSON methods that are compatible with interfaces from 'encoding/json'. They are usable with 'json.Marshal' and 'json.Unmarshal' functions, however actually using those will result in significantly worse performance compared to custom interfaces. + +`MarshalEasyJSON` / `UnmarshalEasyJSON` methods are generated for faster parsing using custom Lexer/Writer structs (`jlexer.Lexer` and `jwriter.Writer`). The method signature is defined in `easyjson.Marshaler` / `easyjson.Unmarshaler` interfaces. These interfaces allow to avoid using any unnecessary reflection or type assertions during parsing. Functions can be used manually or with `easyjson.Marshal<...>` and `easyjson.Unmarshal<...>` helper methods. + +`jwriter.Writer` struct in addition to function for returning the data as a single slice also has methods to return the size and to send the data to an `io.Writer`. This is aimed at a typical HTTP use-case, when you want to know the `Content-Length` before actually starting to send the data. + +There are helpers in the top-level package for marhsaling/unmarshaling the data using custom interfaces to and from writers, including a helper for `http.ResponseWriter`. + +## custom types +If `easyjson.Marshaler` / `easyjson.Unmarshaler` interfaces are implemented by a type involved in JSON parsing, the type will be marshaled/unmarshaled using these methods. `easyjson.Optional` interface allows for a custom type to integrate with 'omitempty' logic. + +As an example, easyjson includes an `easyjson.RawMessage` analogous to `json.RawMessage`. + +Also, there are 'optional' wrappers for primitive types in `easyjson/opt` package. These are useful in the case when it is necessary to distinguish between missing and default value for the type. Wrappers allow to avoid pointers and extra heap allocations in such cases. + +## memory pooling + +The library uses a custom buffer which allocates data in increasing chunks (128-32768 bytes). Chunks of 512 bytes and larger are reused with the help of `sync.Pool`. The maximum size of a chunk is bounded to reduce redundancy in memory allocation and to make the chunks more reusable in the case of large buffer sizes. + +The buffer code is in `easyjson/buffer` package the exact values can be tweaked by a `buffer.Init()` call before the first serialization. + +## limitations +* The library is at an early stage, there are likely to be some bugs and some features of 'encoding/json' may not be supported. Please report such cases, so that they may be fixed sooner. +* Object keys are case-sensitive (unlike encodin/json). Case-insentive behavior will be implemented as an option (case-insensitive matching is slower). +* Unsafe package is used by the code. While a non-unsafe version of easyjson can be made in the future, using unsafe package simplifies a lot of code by allowing no-copy []byte to string conversion within the library. This is used only during parsing and all the returned values are allocated properly. +* Floats are currently formatted with default precision for 'strconv' package. It is obvious that it is not always the correct way to handle it, but there aren't enough use-cases for floats at hand to do anything better. +* During parsing, parts of JSON that are skipped over are not syntactically validated more than required to skip matching parentheses. +* No true streaming support for encoding/decoding. For many use-cases and protocols, data length is typically known on input and needs to be known before sending the data. + +## benchmarks +Most benchmarks were done using a sample 13kB JSON (9k if serialized back trimming the whitespace) from https://dev.twitter.com/rest/reference/get/search/tweets. The sample is very close to real-world data, quite structured and contains a variety of different types. + +For small request benchmarks, an 80-byte portion of the regular sample was used. + +For large request marshalling benchmarks, a struct containing 50 regular samples was used, making a ~500kB output JSON. + +Benchmarks are available in the repository and are run on 'make'. + +### easyjson vs. encoding/json + +easyjson seems to be 5-6 times faster than the default json serialization for unmarshalling, 3-4 times faster for non-concurrent marshalling. Concurrent marshalling is 6-7x faster if marshalling to a writer. + +### easyjson vs. ffjson + +easyjson uses the same approach for code generation as ffjson, but a significantly different approach to lexing and generated code. This allows easyjson to be 2-3x faster for unmarshalling and 1.5-2x faster for non-concurrent unmarshalling. + +ffjson seems to behave weird if used concurrently: for large request pooling hurts performance instead of boosting it, it also does not quite scale well. These issues are likely to be fixable and until that comparisons might vary from version to version a lot. + +easyjson is similar in performance for small requests and 2-5x times faster for large ones if used with a writer. + +### easyjson vs. go/codec + +github.com/ugorji/go/codec library provides compile-time helpers for JSON generation. In this case, helpers are not exactly marshallers as they are encoding-independent. + +easyjson is generally ~2x faster for non-concurrent benchmarks and about 3x faster for concurrent encoding (without marshalling to a writer). Unsafe option for generated helpers was used. + +As an attempt to measure marshalling performance of 'go/codec' (as opposed to allocations/memcpy/writer interface invocations), a benchmark was done with resetting lenght of a byte slice rather than resetting the whole slice to nil. However, the optimization in this exact form may not be applicable in practice, since the memory is not freed between marshalling operations. + +### easyjson vs 'ujson' python module +ujson is using C code for parsing, so it is interesting to see how plain golang compares to that. It is imporant to note that the resulting object for python is slower to access, since the library parses JSON object into dictionaries. + +easyjson seems to be slightly faster for unmarshalling (finally!) and 2-3x faster for marshalling. + +### benchmark figures +The data was measured on 4 February, 2016 using current ffjson and golang 1.6. Data for go/codec was added on 4 March 2016, benchmarked on the same machine. + +#### Unmarshalling +| lib | json size | MB/s | allocs/op | B/op +|--------|-----------|------|-----------|------- +|standard| regular | 22 | 218 | 10229 +|standard| small | 9.7 | 14 | 720 +|--------|-----------|------|-----------|------- +|easyjson| regular | 125 | 128 | 9794 +|easyjson| small | 67 | 3 | 128 +|--------|-----------|------|-----------|------- +|ffjson | regular | 66 | 141 | 9985 +|ffjson | small | 17.6 | 10 | 488 +|--------|-----------|------|-----------|------- +|codec | regular | 55 | 434 | 19299 +|codec | small | 29 | 7 | 336 +|--------|-----------|------|-----------|------- +|ujson | regular | 103 | N/A | N/A + +#### Marshalling, one goroutine. +| lib | json size | MB/s | allocs/op | B/op +|----------|-----------|------|-----------|------- +|standard | regular | 75 | 9 | 23256 +|standard | small | 32 | 3 | 328 +|standard | large | 80 | 17 | 1.2M +|----------|-----------|------|-----------|------- +|easyjson | regular | 213 | 9 | 10260 +|easyjson* | regular | 263 | 8 | 742 +|easyjson | small | 125 | 1 | 128 +|easyjson | large | 212 | 33 | 490k +|easyjson* | large | 262 | 25 | 2879 +|----------|-----------|------|-----------|------- +|ffjson | regular | 122 | 153 | 21340 +|ffjson** | regular | 146 | 152 | 4897 +|ffjson | small | 36 | 5 | 384 +|ffjson** | small | 64 | 4 | 128 +|ffjson | large | 134 | 7317 | 818k +|ffjson** | large | 125 | 7320 | 827k +|----------|-----------|------|-----------|------- +|codec | regular | 80 | 17 | 33601 +|codec*** | regular | 108 | 9 | 1153 +|codec | small | 42 | 3 | 304 +|codec*** | small | 56 | 1 | 48 +|codec | large | 73 | 483 | 2.5M +|codec*** | large | 103 | 451 | 66007 +|----------|-----------|------|-----------|------- +|ujson | regular | 92 | N/A | N/A +\* marshalling to a writer, +\*\* using `ffjson.Pool()`, +\*\*\* reusing output slice instead of resetting it to nil + +#### Marshalling, concurrent. +| lib | json size | MB/s | allocs/op | B/op +|----------|-----------|-------|-----------|------- +|standard | regular | 252 | 9 | 23257 +|standard | small | 124 | 3 | 328 +|standard | large | 289 | 17 | 1.2M +|----------|-----------|-------|-----------|------- +|easyjson | regular | 792 | 9 | 10597 +|easyjson* | regular | 1748 | 8 | 779 +|easyjson | small | 333 | 1 | 128 +|easyjson | large | 718 | 36 | 548k +|easyjson* | large | 2134 | 25 | 4957 +|----------|-----------|------|-----------|------- +|ffjson | regular | 301 | 153 | 21629 +|ffjson** | regular | 707 | 152 | 5148 +|ffjson | small | 62 | 5 | 384 +|ffjson** | small | 282 | 4 | 128 +|ffjson | large | 438 | 7330 | 1.0M +|ffjson** | large | 131 | 7319 | 820k +|----------|-----------|------|-----------|------- +|codec | regular | 183 | 17 | 33603 +|codec*** | regular | 671 | 9 | 1157 +|codec | small | 147 | 3 | 304 +|codec*** | small | 299 | 1 | 48 +|codec | large | 190 | 483 | 2.5M +|codec*** | large | 752 | 451 | 77574 +\* marshalling to a writer, +\*\* using `ffjson.Pool()`, +\*\*\* reusing output slice instead of resetting it to nil + + + diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go new file mode 100644 index 000000000..4de4a51db --- /dev/null +++ b/vendor/github.com/mailru/easyjson/buffer/pool.go @@ -0,0 +1,207 @@ +// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to +// reduce copying and to allow reuse of individual chunks. +package buffer + +import ( + "io" + "sync" +) + +// PoolConfig contains configuration for the allocation and reuse strategy. +type PoolConfig struct { + StartSize int // Minimum chunk size that is allocated. + PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead. + MaxSize int // Maximum chunk size that will be allocated. +} + +var config = PoolConfig{ + StartSize: 128, + PooledSize: 512, + MaxSize: 32768, +} + +// Reuse pool: chunk size -> pool. +var buffers = map[int]*sync.Pool{} + +func initBuffers() { + for l := config.PooledSize; l <= config.MaxSize; l *= 2 { + buffers[l] = new(sync.Pool) + } +} + +func init() { + initBuffers() +} + +// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done. +func Init(cfg PoolConfig) { + config = cfg + initBuffers() +} + +// putBuf puts a chunk to reuse pool if it can be reused. +func putBuf(buf []byte) { + size := cap(buf) + if size < config.PooledSize { + return + } + if c := buffers[size]; c != nil { + c.Put(buf[:0]) + } +} + +// getBuf gets a chunk from reuse pool or creates a new one if reuse failed. +func getBuf(size int) []byte { + if size < config.PooledSize { + return make([]byte, 0, size) + } + + if c := buffers[size]; c != nil { + v := c.Get() + if v != nil { + return v.([]byte) + } + } + return make([]byte, 0, size) +} + +// Buffer is a buffer optimized for serialization without extra copying. +type Buffer struct { + + // Buf is the current chunk that can be used for serialization. + Buf []byte + + toPool []byte + bufs [][]byte +} + +// EnsureSpace makes sure that the current chunk contains at least s free bytes, +// possibly creating a new chunk. +func (b *Buffer) EnsureSpace(s int) { + if cap(b.Buf)-len(b.Buf) >= s { + return + } + l := len(b.Buf) + if l > 0 { + if cap(b.toPool) != cap(b.Buf) { + // Chunk was reallocated, toPool can be pooled. + putBuf(b.toPool) + } + if cap(b.bufs) == 0 { + b.bufs = make([][]byte, 0, 8) + } + b.bufs = append(b.bufs, b.Buf) + l = cap(b.toPool) * 2 + } else { + l = config.StartSize + } + + if l > config.MaxSize { + l = config.MaxSize + } + b.Buf = getBuf(l) + b.toPool = b.Buf +} + +// AppendByte appends a single byte to buffer. +func (b *Buffer) AppendByte(data byte) { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + b.Buf = append(b.Buf, data) +} + +// AppendBytes appends a byte slice to buffer. +func (b *Buffer) AppendBytes(data []byte) { + for len(data) > 0 { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// AppendBytes appends a string to buffer. +func (b *Buffer) AppendString(data string) { + for len(data) > 0 { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// Size computes the size of a buffer by adding sizes of every chunk. +func (b *Buffer) Size() int { + size := len(b.Buf) + for _, buf := range b.bufs { + size += len(buf) + } + return size +} + +// DumpTo outputs the contents of a buffer to a writer and resets the buffer. +func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { + var n int + for _, buf := range b.bufs { + if err == nil { + n, err = w.Write(buf) + written += n + } + putBuf(buf) + } + + if err == nil { + n, err = w.Write(b.Buf) + written += n + } + putBuf(b.toPool) + + b.bufs = nil + b.Buf = nil + b.toPool = nil + + return +} + +// BuildBytes creates a single byte slice with all the contents of the buffer. Data is +// copied if it does not fit in a single chunk. +func (b *Buffer) BuildBytes() []byte { + if len(b.bufs) == 0 { + + ret := b.Buf + b.toPool = nil + b.Buf = nil + + return ret + } + + ret := make([]byte, 0, b.Size()) + for _, buf := range b.bufs { + ret = append(ret, buf...) + putBuf(buf) + } + + ret = append(ret, b.Buf...) + putBuf(b.toPool) + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go new file mode 100644 index 000000000..e90ec40d0 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/error.go @@ -0,0 +1,15 @@ +package jlexer + +import "fmt" + +// LexerError implements the error interface and represents all possible errors that can be +// generated during parsing the JSON data. +type LexerError struct { + Reason string + Offset int + Data string +} + +func (l *LexerError) Error() string { + return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data) +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go new file mode 100644 index 000000000..d700c0a32 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -0,0 +1,956 @@ +// Package jlexer contains a JSON lexer implementation. +// +// It is expected that it is mostly used with generated parser code, so the interface is tuned +// for a parser that knows what kind of data is expected. +package jlexer + +import ( + "fmt" + "io" + "reflect" + "strconv" + "unicode/utf8" + "unsafe" +) + +// tokenKind determines type of a token. +type tokenKind byte + +const ( + tokenUndef tokenKind = iota // No token. + tokenDelim // Delimiter: one of '{', '}', '[' or ']'. + tokenString // A string literal, e.g. "abc\u1234" + tokenNumber // Number literal, e.g. 1.5e5 + tokenBool // Boolean literal: true or false. + tokenNull // null keyword. +) + +// token describes a single token: type, position in the input and value. +type token struct { + kind tokenKind // Type of a token. + + boolValue bool // Value if a boolean literal token. + byteValue []byte // Raw value of a token. + delimValue byte +} + +// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice. +type Lexer struct { + Data []byte // Input data given to the lexer. + + start int // Start of the current token. + pos int // Current unscanned position in the input stream. + token token // Last scanned token, if token.kind != tokenUndef. + + firstElement bool // Whether current element is the first in array or an object. + wantSep byte // A comma or a colon character, which need to occur before a token. + + err error // Error encountered during lexing, if any. +} + +// fetchToken scans the input for the next token. +func (r *Lexer) fetchToken() { + r.token.kind = tokenUndef + r.start = r.pos + + // Check if r.Data has r.pos element + // If it doesn't, it mean corrupted input data + if len(r.Data) < r.pos { + r.errParse("Unexpected end of data") + return + } + // Determine the type of a token by skipping whitespace and reading the + // first character. + for _, c := range r.Data[r.pos:] { + switch c { + case ':', ',': + if r.wantSep == c { + r.pos++ + r.start++ + r.wantSep = 0 + } else { + r.errSyntax() + } + + case ' ', '\t', '\r', '\n': + r.pos++ + r.start++ + + case '"': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenString + r.fetchString() + return + + case '{', '[': + if r.wantSep != 0 { + r.errSyntax() + } + r.firstElement = true + r.token.kind = tokenDelim + r.token.delimValue = r.Data[r.pos] + r.pos++ + return + + case '}', ']': + if !r.firstElement && (r.wantSep != ',') { + r.errSyntax() + } + r.wantSep = 0 + r.token.kind = tokenDelim + r.token.delimValue = r.Data[r.pos] + r.pos++ + return + + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': + if r.wantSep != 0 { + r.errSyntax() + } + r.token.kind = tokenNumber + r.fetchNumber() + return + + case 'n': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenNull + r.fetchNull() + return + + case 't': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenBool + r.token.boolValue = true + r.fetchTrue() + return + + case 'f': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenBool + r.token.boolValue = false + r.fetchFalse() + return + + default: + r.errSyntax() + return + } + } + r.err = io.EOF + return +} + +// isTokenEnd returns true if the char can follow a non-delimiter token +func isTokenEnd(c byte) bool { + return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':' +} + +// fetchNull fetches and checks remaining bytes of null keyword. +func (r *Lexer) fetchNull() { + r.pos += 4 + if r.pos > len(r.Data) || + r.Data[r.pos-3] != 'u' || + r.Data[r.pos-2] != 'l' || + r.Data[r.pos-1] != 'l' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 4 + r.errSyntax() + } +} + +// fetchTrue fetches and checks remaining bytes of true keyword. +func (r *Lexer) fetchTrue() { + r.pos += 4 + if r.pos > len(r.Data) || + r.Data[r.pos-3] != 'r' || + r.Data[r.pos-2] != 'u' || + r.Data[r.pos-1] != 'e' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 4 + r.errSyntax() + } +} + +// fetchFalse fetches and checks remaining bytes of false keyword. +func (r *Lexer) fetchFalse() { + r.pos += 5 + if r.pos > len(r.Data) || + r.Data[r.pos-4] != 'a' || + r.Data[r.pos-3] != 'l' || + r.Data[r.pos-2] != 's' || + r.Data[r.pos-1] != 'e' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 5 + r.errSyntax() + } +} + +// bytesToStr creates a string pointing at the slice to avoid copying. +// +// Warning: the string returned by the function should be used with care, as the whole input data +// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data +// may be garbage-collected even when the string exists. +func bytesToStr(data []byte) string { + h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + shdr := reflect.StringHeader{h.Data, h.Len} + return *(*string)(unsafe.Pointer(&shdr)) +} + +// fetchNumber scans a number literal token. +func (r *Lexer) fetchNumber() { + hasE := false + afterE := false + hasDot := false + + r.pos++ + for i, c := range r.Data[r.pos:] { + switch { + case c >= '0' && c <= '9': + afterE = false + case c == '.' && !hasDot: + hasDot = true + case (c == 'e' || c == 'E') && !hasE: + hasE = true + hasDot = true + afterE = true + case (c == '+' || c == '-') && afterE: + afterE = false + default: + r.pos += i + if !isTokenEnd(c) { + r.errSyntax() + } else { + r.token.byteValue = r.Data[r.start:r.pos] + } + return + } + } + + r.pos = len(r.Data) + r.token.byteValue = r.Data[r.start:] +} + +// findStringLen tries to scan into the string literal for ending quote char to determine required size. +// The size will be exact if no escapes are present and may be inexact if there are escaped chars. +func findStringLen(data []byte) (hasEscapes bool, length int) { + delta := 0 + + for i := 0; i < len(data); i++ { + switch data[i] { + case '\\': + i++ + delta++ + if i < len(data) && data[i] == 'u' { + delta++ + } + case '"': + return (delta > 0), (i - delta) + } + } + + return false, len(data) +} + +// processEscape processes a single escape sequence and returns number of bytes processed. +func (r *Lexer) processEscape(data []byte) (int, error) { + if len(data) < 2 { + return 0, fmt.Errorf("syntax error at %v", string(data)) + } + + c := data[1] + switch c { + case '"', '/', '\\': + r.token.byteValue = append(r.token.byteValue, c) + return 2, nil + case 'b': + r.token.byteValue = append(r.token.byteValue, '\b') + return 2, nil + case 'f': + r.token.byteValue = append(r.token.byteValue, '\f') + return 2, nil + case 'n': + r.token.byteValue = append(r.token.byteValue, '\n') + return 2, nil + case 'r': + r.token.byteValue = append(r.token.byteValue, '\r') + return 2, nil + case 't': + r.token.byteValue = append(r.token.byteValue, '\t') + return 2, nil + case 'u': + default: + return 0, fmt.Errorf("syntax error") + } + + var val rune + + for i := 2; i < len(data) && i < 6; i++ { + var v byte + c = data[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + v = c - '0' + case 'a', 'b', 'c', 'd', 'e', 'f': + v = c - 'a' + 10 + case 'A', 'B', 'C', 'D', 'E', 'F': + v = c - 'A' + 10 + default: + return 0, fmt.Errorf("syntax error") + } + + val <<= 4 + val |= rune(v) + } + + l := utf8.RuneLen(val) + if l == -1 { + return 0, fmt.Errorf("invalid unicode escape") + } + + var d [4]byte + utf8.EncodeRune(d[:], val) + r.token.byteValue = append(r.token.byteValue, d[:l]...) + return 6, nil +} + +// fetchString scans a string literal token. +func (r *Lexer) fetchString() { + r.pos++ + data := r.Data[r.pos:] + + hasEscapes, length := findStringLen(data) + if !hasEscapes { + r.token.byteValue = data[:length] + r.pos += length + 1 + return + } + + r.token.byteValue = make([]byte, 0, length) + p := 0 + for i := 0; i < len(data); { + switch data[i] { + case '"': + r.pos += i + 1 + r.token.byteValue = append(r.token.byteValue, data[p:i]...) + i++ + return + + case '\\': + r.token.byteValue = append(r.token.byteValue, data[p:i]...) + off, err := r.processEscape(data[i:]) + if err != nil { + r.errParse(err.Error()) + return + } + i += off + p = i + + default: + i++ + } + } + r.errParse("unterminated string literal") +} + +// scanToken scans the next token if no token is currently available in the lexer. +func (r *Lexer) scanToken() { + if r.token.kind != tokenUndef || r.err != nil { + return + } + + r.fetchToken() +} + +// consume resets the current token to allow scanning the next one. +func (r *Lexer) consume() { + r.token.kind = tokenUndef + r.token.delimValue = 0 +} + +// Ok returns true if no error (including io.EOF) was encountered during scanning. +func (r *Lexer) Ok() bool { + return r.err == nil +} + +const maxErrorContextLen = 13 + +func (r *Lexer) errParse(what string) { + if r.err == nil { + var str string + if len(r.Data)-r.pos <= maxErrorContextLen { + str = string(r.Data) + } else { + str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..." + } + r.err = &LexerError{ + Reason: what, + Offset: r.pos, + Data: str, + } + } +} + +func (r *Lexer) errSyntax() { + r.errParse("syntax error") +} + +func (r *Lexer) errInvalidToken(expected string) { + if r.err == nil { + var str string + if len(r.token.byteValue) <= maxErrorContextLen { + str = string(r.token.byteValue) + } else { + str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." + } + r.err = &LexerError{ + Reason: fmt.Sprintf("expected %s", expected), + Offset: r.pos, + Data: str, + } + } +} + +// Delim consumes a token and verifies that it is the given delimiter. +func (r *Lexer) Delim(c byte) { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + if !r.Ok() || r.token.delimValue != c { + r.errInvalidToken(string([]byte{c})) + } + r.consume() +} + +// IsDelim returns true if there was no scanning error and next token is the given delimiter. +func (r *Lexer) IsDelim(c byte) bool { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + return !r.Ok() || r.token.delimValue == c +} + +// Null verifies that the next token is null and consumes it. +func (r *Lexer) Null() { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + if !r.Ok() || r.token.kind != tokenNull { + r.errInvalidToken("null") + } + r.consume() +} + +// IsNull returns true if the next token is a null keyword. +func (r *Lexer) IsNull() bool { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + return r.Ok() && r.token.kind == tokenNull +} + +// Skip skips a single token. +func (r *Lexer) Skip() { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + r.consume() +} + +// SkipRecursive skips next array or object completely, or just skips a single token if not +// an array/object. +// +// Note: no syntax validation is performed on the skipped data. +func (r *Lexer) SkipRecursive() { + r.scanToken() + + var start, end byte + + if r.token.delimValue == '{' { + start, end = '{', '}' + } else if r.token.delimValue == '[' { + start, end = '[', ']' + } else { + r.consume() + return + } + + r.consume() + + level := 1 + inQuotes := false + wasEscape := false + + for i, c := range r.Data[r.pos:] { + switch { + case c == start && !inQuotes: + level++ + case c == end && !inQuotes: + level-- + if level == 0 { + r.pos += i + 1 + return + } + case c == '\\' && inQuotes: + wasEscape = true + continue + case c == '"' && inQuotes: + inQuotes = wasEscape + case c == '"': + inQuotes = true + } + wasEscape = false + } + r.pos = len(r.Data) + r.err = io.EOF +} + +// Raw fetches the next item recursively as a data slice +func (r *Lexer) Raw() []byte { + r.SkipRecursive() + if !r.Ok() { + return nil + } + return r.Data[r.start:r.pos] +} + +// UnsafeString returns the string value if the token is a string literal. +// +// Warning: returned string may point to the input buffer, so the string should not outlive +// the input buffer. Intended pattern of usage is as an argument to a switch statement. +func (r *Lexer) UnsafeString() string { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "" + } + + ret := bytesToStr(r.token.byteValue) + r.consume() + return ret +} + +// String reads a string literal. +func (r *Lexer) String() string { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "" + + } + ret := string(r.token.byteValue) + r.consume() + return ret +} + +// Bool reads a true or false boolean keyword. +func (r *Lexer) Bool() bool { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + if !r.Ok() || r.token.kind != tokenBool { + r.errInvalidToken("bool") + return false + + } + ret := r.token.boolValue + r.consume() + return ret +} + +func (r *Lexer) number() string { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + if !r.Ok() || r.token.kind != tokenNumber { + r.errInvalidToken("number") + return "" + + } + ret := bytesToStr(r.token.byteValue) + r.consume() + return ret +} + +func (r *Lexer) Uint8() uint8 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 8) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return uint8(n) +} + +func (r *Lexer) Uint16() uint16 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return uint16(n) +} + +func (r *Lexer) Uint32() uint32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return uint32(n) +} + +func (r *Lexer) Uint64() uint64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return n +} + +func (r *Lexer) Uint() uint { + return uint(r.Uint64()) +} + +func (r *Lexer) Int8() int8 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 8) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return int8(n) +} + +func (r *Lexer) Int16() int16 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return int16(n) +} + +func (r *Lexer) Int32() int32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return int32(n) +} + +func (r *Lexer) Int64() int64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return n +} + +func (r *Lexer) Int() int { + return int(r.Int64()) +} + +func (r *Lexer) Uint8Str() uint8 { + s := r.UnsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 8) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return uint8(n) +} + +func (r *Lexer) Uint16Str() uint16 { + s := r.UnsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return uint16(n) +} + +func (r *Lexer) Uint32Str() uint32 { + s := r.UnsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return uint32(n) +} + +func (r *Lexer) Uint64Str() uint64 { + s := r.UnsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return n +} + +func (r *Lexer) UintStr() uint { + return uint(r.Uint64Str()) +} + +func (r *Lexer) Int8Str() int8 { + s := r.UnsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 8) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return int8(n) +} + +func (r *Lexer) Int16Str() int16 { + s := r.UnsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return int16(n) +} + +func (r *Lexer) Int32Str() int32 { + s := r.UnsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return int32(n) +} + +func (r *Lexer) Int64Str() int64 { + s := r.UnsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return n +} + +func (r *Lexer) IntStr() int { + return int(r.Int64Str()) +} + +func (r *Lexer) Float32() float32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseFloat(s, 32) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return float32(n) +} + +func (r *Lexer) Float64() float64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseFloat(s, 64) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return n +} + +func (r *Lexer) Error() error { + return r.err +} + +func (r *Lexer) AddError(e error) { + if r.err == nil { + r.err = e + } +} + +// Interface fetches an interface{} analogous to the 'encoding/json' package. +func (r *Lexer) Interface() interface{} { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + + if !r.Ok() { + return nil + } + switch r.token.kind { + case tokenString: + return r.String() + case tokenNumber: + return r.Float64() + case tokenBool: + return r.Bool() + case tokenNull: + r.Null() + return nil + } + + if r.token.delimValue == '{' { + r.consume() + + ret := map[string]interface{}{} + for !r.IsDelim('}') { + key := r.String() + r.WantColon() + ret[key] = r.Interface() + r.WantComma() + } + r.Delim('}') + + if r.Ok() { + return ret + } else { + return nil + } + } else if r.token.delimValue == '[' { + r.consume() + + var ret []interface{} + for !r.IsDelim(']') { + ret = append(ret, r.Interface()) + r.WantComma() + } + r.Delim(']') + + if r.Ok() { + return ret + } else { + return nil + } + } + r.errSyntax() + return nil +} + +// WantComma requires a comma to be present before fetching next token. +func (r *Lexer) WantComma() { + r.wantSep = ',' + r.firstElement = false +} + +// WantColon requires a colon to be present before fetching next token. +func (r *Lexer) WantColon() { + r.wantSep = ':' + r.firstElement = false +} diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go new file mode 100644 index 000000000..907675f9c --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go @@ -0,0 +1,273 @@ +// Package jwriter contains a JSON writer. +package jwriter + +import ( + "io" + "strconv" + "unicode/utf8" + + "github.com/mailru/easyjson/buffer" +) + +// Writer is a JSON writer. +type Writer struct { + Error error + Buffer buffer.Buffer +} + +// Size returns the size of the data that was written out. +func (w *Writer) Size() int { + return w.Buffer.Size() +} + +// DumpTo outputs the data to given io.Writer, resetting the buffer. +func (w *Writer) DumpTo(out io.Writer) (written int, err error) { + return w.Buffer.DumpTo(out) +} + +// BuildBytes returns writer data as a single byte slice. +func (w *Writer) BuildBytes() ([]byte, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.BuildBytes(), nil +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawByte(c byte) { + w.Buffer.AppendByte(c) +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawString(s string) { + w.Buffer.AppendString(s) +} + +// RawByte appends raw binary data to the buffer or sets the error if it is given. Useful for +// calling with results of MarshalJSON-like functions. +func (w *Writer) Raw(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.Buffer.AppendBytes(data) + default: + w.RawString("null") + } +} + +func (w *Writer) Uint8(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint16(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint32(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint64(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Int8(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int16(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int32(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int64(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Uint8Str(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint16Str(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint32Str(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) UintStr(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint64Str(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int8Str(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int16Str(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int32Str(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) IntStr(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int64Str(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Float32(n float32) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) +} + +func (w *Writer) Float64(n float64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) +} + +func (w *Writer) Bool(v bool) { + w.Buffer.EnsureSpace(5) + if v { + w.Buffer.Buf = append(w.Buffer.Buf, "true"...) + } else { + w.Buffer.Buf = append(w.Buffer.Buf, "false"...) + } +} + +const chars = "0123456789abcdef" + +func (w *Writer) String(s string) { + w.Buffer.AppendByte('"') + + // Portions of the string that contain no escapes are appended as + // byte slices. + + p := 0 // last non-escape symbol + + for i := 0; i < len(s); { + // single-with character + if c := s[i]; c < utf8.RuneSelf { + var escape byte + switch c { + case '\t': + escape = 't' + case '\r': + escape = 'r' + case '\n': + escape = 'n' + case '\\': + escape = '\\' + case '"': + escape = '"' + case '<', '>': + // do nothing + default: + if c >= 0x20 { + // no escaping is required + i++ + continue + } + } + if escape != 0 { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendByte('\\') + w.Buffer.AppendByte(escape) + } else { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\u00`) + w.Buffer.AppendByte(chars[c>>4]) + w.Buffer.AppendByte(chars[c&0xf]) + } + i++ + p = i + continue + } + + // broken utf + runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) + if runeValue == utf8.RuneError && runeWidth == 1 { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\ufffd`) + i++ + p = i + continue + } + + // jsonp stuff - tab separator and line separator + if runeValue == '\u2028' || runeValue == '\u2029' { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\u202`) + w.Buffer.AppendByte(chars[runeValue&0xf]) + i += runeWidth + p = i + continue + } + i += runeWidth + } + w.Buffer.AppendString(s[p:]) + w.Buffer.AppendByte('"') +} diff --git a/vendor/github.com/ugorji/go/LICENSE b/vendor/github.com/ugorji/go/LICENSE new file mode 100644 index 000000000..95a0f0541 --- /dev/null +++ b/vendor/github.com/ugorji/go/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2012-2015 Ugorji Nwoke. +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ugorji/go/README.md b/vendor/github.com/ugorji/go/README.md new file mode 100644 index 000000000..6bb74742f --- /dev/null +++ b/vendor/github.com/ugorji/go/README.md @@ -0,0 +1,20 @@ +# go/codec + +This repository contains the `go-codec` library, +a High Performance and Feature-Rich Idiomatic encode/decode and rpc library for + + - msgpack: https://github.com/msgpack/msgpack + - binc: http://github.com/ugorji/binc + - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 + - json: http://json.org http://tools.ietf.org/html/rfc7159 + +For more information: + + - [see the codec/Readme for quick usage information](https://github.com/ugorji/go/tree/master/codec#readme) + - [view the API on godoc](http://godoc.org/github.com/ugorji/go/codec) + - [read the detailed usage/how-to primer](http://ugorji.net/blog/go-codec-primer) + +Install using: + + go get github.com/ugorji/go/codec + diff --git a/vendor/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/ugorji/go/codec/0doc.go new file mode 100644 index 000000000..209f9ebad --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/0doc.go @@ -0,0 +1,199 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +High Performance, Feature-Rich Idiomatic Go codec/encoding library for +binc, msgpack, cbor, json. + +Supported Serialization formats are: + + - msgpack: https://github.com/msgpack/msgpack + - binc: http://github.com/ugorji/binc + - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 + - json: http://json.org http://tools.ietf.org/html/rfc7159 + - simple: + +To install: + + go get github.com/ugorji/go/codec + +This package understands the 'unsafe' tag, to allow using unsafe semantics: + + - When decoding into a struct, you need to read the field name as a string + so you can find the struct field it is mapped to. + Using `unsafe` will bypass the allocation and copying overhead of []byte->string conversion. + +To install using unsafe, pass the 'unsafe' tag: + + go get -tags=unsafe github.com/ugorji/go/codec + +For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer . + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. + - Multiple conversions: + Package coerces types where appropriate + e.g. decode an int in the stream into a float, etc. + - Corner Cases: + Overflows, nil maps/slices, nil values in streams are handled correctly + - Standard field renaming via tags + - Support for omitting empty fields during an encoding + - Encoding from any value and decoding into pointer to any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Extensions to support efficient encoding/decoding of any named types + - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces + - Decoding without a schema (into a interface{}). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Encode a struct as an array, and decode struct from an array in the data stream + - Comprehensive support for anonymous fields + - Fast (no-reflection) encoding/decoding of common maps and slices + - Code-generation for faster performance. + - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats + - Support indefinite-length formats to enable true streaming + (for formats which support it e.g. json, cbor) + - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. + This mostly applies to maps, where iteration order is non-deterministic. + - NIL in data stream decoded as zero value + - Never silently skip data when decoding. + User decides whether to return an error or silently skip data when keys or indexes + in the data stream do not map to fields in the struct. + - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown) + - Encode/Decode from/to chan types (for iterative streaming support) + - Drop-in replacement for encoding/json. `json:` key in struct tag supported. + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Handle unique idiosyncrasies of codecs e.g. + - For messagepack, configure how ambiguities in handling raw bytes are resolved + - For messagepack, provide rpc server/client codec to support + msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + +Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +Usage + +The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification. + +The Encoder and Decoder are NOT safe for concurrent use. + +Consequently, the usage model is basically: + + - Create and initialize the Handle before any use. + Once created, DO NOT modify it. + - Multiple Encoders or Decoders can now use the Handle concurrently. + They only read information off the Handle (never write). + - However, each Encoder or Decoder MUST not be used concurrently + - To re-use an Encoder/Decoder, call Reset(...) on it first. + This allows you use state maintained on the Encoder/Decoder. + +Sample usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ch codec.CborHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +*/ +package codec + +// Benefits of go-codec: +// +// - encoding/json always reads whole file into memory first. +// This makes it unsuitable for parsing very large files. +// - encoding/xml cannot parse into a map[string]interface{} +// I found this out on reading https://github.com/clbanning/mxj + +// TODO: +// +// - optimization for codecgen: +// if len of entity is <= 3 words, then support a value receiver for encode. +// - (En|De)coder should store an error when it occurs. +// Until reset, subsequent calls return that error that was stored. +// This means that free panics must go away. +// All errors must be raised through errorf method. +// - Decoding using a chan is good, but incurs concurrency costs. +// This is because there's no fast way to use a channel without it +// having to switch goroutines constantly. +// Callback pattern is still the best. Maybe consider supporting something like: +// type X struct { +// Name string +// Ys []Y +// Ys chan <- Y +// Ys func(Y) -> call this function for each entry +// } +// - Consider adding a isZeroer interface { isZero() bool } +// It is used within isEmpty, for omitEmpty support. +// - Consider making Handle used AS-IS within the encoding/decoding session. +// This means that we don't cache Handle information within the (En|De)coder, +// except we really need it at Reset(...) +// - Consider adding math/big support +// - Consider reducing the size of the generated functions: +// Maybe use one loop, and put the conditionals in the loop. +// for ... { if cLen > 0 { if j == cLen { break } } else if dd.CheckBreak() { break } } diff --git a/vendor/github.com/ugorji/go/codec/README.md b/vendor/github.com/ugorji/go/codec/README.md new file mode 100644 index 000000000..91cb3a27b --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/README.md @@ -0,0 +1,148 @@ +# Codec + +High Performance, Feature-Rich Idiomatic Go codec/encoding library for +binc, msgpack, cbor, json. + +Supported Serialization formats are: + + - msgpack: https://github.com/msgpack/msgpack + - binc: http://github.com/ugorji/binc + - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 + - json: http://json.org http://tools.ietf.org/html/rfc7159 + - simple: + +To install: + + go get github.com/ugorji/go/codec + +This package understands the `unsafe` tag, to allow using unsafe semantics: + + - When decoding into a struct, you need to read the field name as a string + so you can find the struct field it is mapped to. + Using `unsafe` will bypass the allocation and copying overhead of `[]byte->string` conversion. + +To use it, you must pass the `unsafe` tag during install: + +``` +go install -tags=unsafe github.com/ugorji/go/codec +``` + +Online documentation: http://godoc.org/github.com/ugorji/go/codec +Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. + - Multiple conversions: + Package coerces types where appropriate + e.g. decode an int in the stream into a float, etc. + - Corner Cases: + Overflows, nil maps/slices, nil values in streams are handled correctly + - Standard field renaming via tags + - Support for omitting empty fields during an encoding + - Encoding from any value and decoding into pointer to any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Extensions to support efficient encoding/decoding of any named types + - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces + - Decoding without a schema (into a interface{}). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Encode a struct as an array, and decode struct from an array in the data stream + - Comprehensive support for anonymous fields + - Fast (no-reflection) encoding/decoding of common maps and slices + - Code-generation for faster performance. + - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats + - Support indefinite-length formats to enable true streaming + (for formats which support it e.g. json, cbor) + - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. + This mostly applies to maps, where iteration order is non-deterministic. + - NIL in data stream decoded as zero value + - Never silently skip data when decoding. + User decides whether to return an error or silently skip data when keys or indexes + in the data stream do not map to fields in the struct. + - Encode/Decode from/to chan types (for iterative streaming support) + - Drop-in replacement for encoding/json. `json:` key in struct tag supported. + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Handle unique idiosyncrasies of codecs e.g. + - For messagepack, configure how ambiguities in handling raw bytes are resolved + - For messagepack, provide rpc server/client codec to support + msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + +## Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +## RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +## Usage + +Typical usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ch codec.CborHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + diff --git a/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go new file mode 100644 index 000000000..33120dcb6 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/binc.go @@ -0,0 +1,929 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" + "time" +) + +const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. + +// vd as low 4 bits (there are 16 slots) +const ( + bincVdSpecial byte = iota + bincVdPosInt + bincVdNegInt + bincVdFloat + + bincVdString + bincVdByteArray + bincVdArray + bincVdMap + + bincVdTimestamp + bincVdSmallInt + bincVdUnicodeOther + bincVdSymbol + + bincVdDecimal + _ // open slot + _ // open slot + bincVdCustomExt = 0x0f +) + +const ( + bincSpNil byte = iota + bincSpFalse + bincSpTrue + bincSpNan + bincSpPosInf + bincSpNegInf + bincSpZeroFloat + bincSpZero + bincSpNegOne +) + +const ( + bincFlBin16 byte = iota + bincFlBin32 + _ // bincFlBin32e + bincFlBin64 + _ // bincFlBin64e + // others not currently supported +) + +type bincEncDriver struct { + e *Encoder + w encWriter + m map[string]uint16 // symbols + b [scratchByteArrayLen]byte + s uint16 // symbols sequencer + encNoSeparator +} + +func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) { + if rt == timeTypId { + var bs []byte + switch x := v.(type) { + case time.Time: + bs = encodeTime(x) + case *time.Time: + bs = encodeTime(*x) + default: + e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v) + } + e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) + e.w.writeb(bs) + } +} + +func (e *bincEncDriver) EncodeNil() { + e.w.writen1(bincVdSpecial<<4 | bincSpNil) +} + +func (e *bincEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(bincVdSpecial<<4 | bincSpTrue) + } else { + e.w.writen1(bincVdSpecial<<4 | bincSpFalse) + } +} + +func (e *bincEncDriver) EncodeFloat32(f float32) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + e.w.writen1(bincVdFloat<<4 | bincFlBin32) + bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *bincEncDriver) EncodeFloat64(f float64) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + bigen.PutUint64(e.b[:8], math.Float64bits(f)) + if bincDoPrune { + i := 7 + for ; i >= 0 && (e.b[i] == 0); i-- { + } + i++ + if i <= 6 { + e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) + e.w.writen1(byte(i)) + e.w.writeb(e.b[:i]) + return + } + } + e.w.writen1(bincVdFloat<<4 | bincFlBin64) + e.w.writeb(e.b[:8]) +} + +func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { + if lim == 4 { + bigen.PutUint32(e.b[:lim], uint32(v)) + } else { + bigen.PutUint64(e.b[:lim], v) + } + if bincDoPrune { + i := pruneSignExt(e.b[:lim], pos) + e.w.writen1(bd | lim - 1 - byte(i)) + e.w.writeb(e.b[i:lim]) + } else { + e.w.writen1(bd | lim - 1) + e.w.writeb(e.b[:lim]) + } +} + +func (e *bincEncDriver) EncodeInt(v int64) { + const nbd byte = bincVdNegInt << 4 + if v >= 0 { + e.encUint(bincVdPosInt<<4, true, uint64(v)) + } else if v == -1 { + e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) + } else { + e.encUint(bincVdNegInt<<4, false, uint64(-v)) + } +} + +func (e *bincEncDriver) EncodeUint(v uint64) { + e.encUint(bincVdPosInt<<4, true, v) +} + +func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { + if v == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZero) + } else if pos && v >= 1 && v <= 16 { + e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) + } else if v <= math.MaxUint8 { + e.w.writen2(bd|0x0, byte(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd | 0x01) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.encIntegerPrune(bd, pos, v, 4) + } else { + e.encIntegerPrune(bd, pos, v, 8) + } +} + +func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(rv) + if bs == nil { + e.EncodeNil() + return + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +} + +func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(bincVdCustomExt<<4, uint64(length)) + e.w.writen1(xtag) +} + +func (e *bincEncDriver) EncodeArrayStart(length int) { + e.encLen(bincVdArray<<4, uint64(length)) +} + +func (e *bincEncDriver) EncodeMapStart(length int) { + e.encLen(bincVdMap<<4, uint64(length)) +} + +func (e *bincEncDriver) EncodeString(c charEncoding, v string) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writestr(v) + } +} + +func (e *bincEncDriver) EncodeSymbol(v string) { + // if WriteSymbolsNoRefs { + // e.encodeString(c_UTF8, v) + // return + // } + + //symbols only offer benefit when string length > 1. + //This is because strings with length 1 take only 2 bytes to store + //(bd with embedded length, and single byte for string val). + + l := len(v) + if l == 0 { + e.encBytesLen(c_UTF8, 0) + return + } else if l == 1 { + e.encBytesLen(c_UTF8, 1) + e.w.writen1(v[0]) + return + } + if e.m == nil { + e.m = make(map[string]uint16, 16) + } + ui, ok := e.m[v] + if ok { + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8) + bigenHelper{e.b[:2], e.w}.writeUint16(ui) + } + } else { + e.s++ + ui = e.s + //ui = uint16(atomic.AddUint32(&e.s, 1)) + e.m[v] = ui + var lenprec uint8 + if l <= math.MaxUint8 { + // lenprec = 0 + } else if l <= math.MaxUint16 { + lenprec = 1 + } else if int64(l) <= math.MaxUint32 { + lenprec = 2 + } else { + lenprec = 3 + } + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) + bigenHelper{e.b[:2], e.w}.writeUint16(ui) + } + if lenprec == 0 { + e.w.writen1(byte(l)) + } else if lenprec == 1 { + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l)) + } else if lenprec == 2 { + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l)) + } else { + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l)) + } + e.w.writestr(v) + } +} + +func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writeb(v) + } +} + +func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { + //TODO: support bincUnicodeOther (for now, just use string or bytearray) + if c == c_RAW { + e.encLen(bincVdByteArray<<4, length) + } else { + e.encLen(bincVdString<<4, length) + } +} + +func (e *bincEncDriver) encLen(bd byte, l uint64) { + if l < 12 { + e.w.writen1(bd | uint8(l+4)) + } else { + e.encLenNumber(bd, l) + } +} + +func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { + if v <= math.MaxUint8 { + e.w.writen2(bd, byte(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd | 0x01) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd | 0x02) + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) + } else { + e.w.writen1(bd | 0x03) + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v)) + } +} + +//------------------------------------ + +type bincDecSymbol struct { + s string + b []byte + i uint16 +} + +type bincDecDriver struct { + d *Decoder + h *BincHandle + r decReader + br bool // bytes reader + bdRead bool + bd byte + vd byte + vs byte + noStreamingCodec + decNoSeparator + b [scratchByteArrayLen]byte + + // linear searching on this slice is ok, + // because we typically expect < 32 symbols in each stream. + s []bincDecSymbol +} + +func (d *bincDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.vd = d.bd >> 4 + d.vs = d.bd & 0x0f + d.bdRead = true +} + +func (d *bincDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *bincDecDriver) ContainerType() (vt valueType) { + if d.vd == bincVdSpecial && d.vs == bincSpNil { + return valueTypeNil + } else if d.vd == bincVdByteArray { + return valueTypeBytes + } else if d.vd == bincVdString { + return valueTypeString + } else if d.vd == bincVdArray { + return valueTypeArray + } else if d.vd == bincVdMap { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *bincDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return true + } + return false +} + +func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) { + if !d.bdRead { + d.readNextBd() + } + if rt == timeTypId { + if d.vd != bincVdTimestamp { + d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) + return + } + tt, err := decodeTime(d.r.readx(int(d.vs))) + if err != nil { + panic(err) + } + var vt *time.Time = v.(*time.Time) + *vt = tt + d.bdRead = false + } +} + +func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { + if vs&0x8 == 0 { + d.r.readb(d.b[0:defaultLen]) + } else { + l := d.r.readn1() + if l > 8 { + d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l) + return + } + for i := l; i < 8; i++ { + d.b[i] = 0 + } + d.r.readb(d.b[0:l]) + } +} + +func (d *bincDecDriver) decFloat() (f float64) { + //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; } + if x := d.vs & 0x7; x == bincFlBin32 { + d.decFloatPre(d.vs, 4) + f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) + } else if x == bincFlBin64 { + d.decFloatPre(d.vs, 8) + f = math.Float64frombits(bigen.Uint64(d.b[0:8])) + } else { + d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) + return + } + return +} + +func (d *bincDecDriver) decUint() (v uint64) { + // need to inline the code (interface conversion and type assertion expensive) + switch d.vs { + case 0: + v = uint64(d.r.readn1()) + case 1: + d.r.readb(d.b[6:8]) + v = uint64(bigen.Uint16(d.b[6:8])) + case 2: + d.b[4] = 0 + d.r.readb(d.b[5:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + case 3: + d.r.readb(d.b[4:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + case 4, 5, 6: + lim := int(7 - d.vs) + d.r.readb(d.b[lim:8]) + for i := 0; i < lim; i++ { + d.b[i] = 0 + } + v = uint64(bigen.Uint64(d.b[:8])) + case 7: + d.r.readb(d.b[:8]) + v = uint64(bigen.Uint64(d.b[:8])) + default: + d.d.errorf("unsigned integers with greater than 64 bits of precision not supported") + return + } + return +} + +func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) { + if !d.bdRead { + d.readNextBd() + } + vd, vs := d.vd, d.vs + if vd == bincVdPosInt { + ui = d.decUint() + } else if vd == bincVdNegInt { + ui = d.decUint() + neg = true + } else if vd == bincVdSmallInt { + ui = uint64(d.vs) + 1 + } else if vd == bincVdSpecial { + if vs == bincSpZero { + //i = 0 + } else if vs == bincSpNegOne { + neg = true + ui = 1 + } else { + d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs) + return + } + } else { + d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) + return + } + return +} + +func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) { + ui, neg := d.decCheckInteger() + i, overflow := chkOvf.SignedInt(ui) + if overflow { + d.d.errorf("simple: overflow converting %v to signed integer", ui) + return + } + if neg { + i = -i + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("binc: overflow integer: %v", i) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + ui, neg := d.decCheckInteger() + if neg { + d.d.errorf("Assigning negative signed value to unsigned type") + return + } + if chkOvf.Uint(ui, bitsize) { + d.d.errorf("binc: overflow integer: %v", ui) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + vd, vs := d.vd, d.vs + if vd == bincVdSpecial { + d.bdRead = false + if vs == bincSpNan { + return math.NaN() + } else if vs == bincSpPosInf { + return math.Inf(1) + } else if vs == bincSpZeroFloat || vs == bincSpZero { + return + } else if vs == bincSpNegInf { + return math.Inf(-1) + } else { + d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) + return + } + } else if vd == bincVdFloat { + f = d.decFloat() + } else { + f = float64(d.DecodeInt(64)) + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("binc: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *bincDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) { + // b = false + } else if bd == (bincVdSpecial | bincSpTrue) { + b = true + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) ReadMapStart() (length int) { + if d.vd != bincVdMap { + d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) + return + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) ReadArrayStart() (length int) { + if d.vd != bincVdArray { + d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) + return + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) decLen() int { + if d.vs > 3 { + return int(d.vs - 4) + } + return int(d.decLenNumber()) +} + +func (d *bincDecDriver) decLenNumber() (v uint64) { + if x := d.vs; x == 0 { + v = uint64(d.r.readn1()) + } else if x == 1 { + d.r.readb(d.b[6:8]) + v = uint64(bigen.Uint16(d.b[6:8])) + } else if x == 2 { + d.r.readb(d.b[4:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + } else { + d.r.readb(d.b[:8]) + v = bigen.Uint64(d.b[:8]) + } + return +} + +func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return + } + var slen int = -1 + // var ok bool + switch d.vd { + case bincVdString, bincVdByteArray: + slen = d.decLen() + if zerocopy { + if d.br { + bs2 = d.r.readx(slen) + } else if len(bs) == 0 { + bs2 = decByteSlice(d.r, slen, d.b[:]) + } else { + bs2 = decByteSlice(d.r, slen, bs) + } + } else { + bs2 = decByteSlice(d.r, slen, bs) + } + if withString { + s = string(bs2) + } + case bincVdSymbol: + // zerocopy doesn't apply for symbols, + // as the values must be stored in a table for later use. + // + //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, + //extract symbol + //if containsStringVal, read it and put in map + //else look in map for string value + var symbol uint16 + vs := d.vs + if vs&0x8 == 0 { + symbol = uint16(d.r.readn1()) + } else { + symbol = uint16(bigen.Uint16(d.r.readx(2))) + } + if d.s == nil { + d.s = make([]bincDecSymbol, 0, 16) + } + + if vs&0x4 == 0 { + for i := range d.s { + j := &d.s[i] + if j.i == symbol { + bs2 = j.b + if withString { + if j.s == "" && bs2 != nil { + j.s = string(bs2) + } + s = j.s + } + break + } + } + } else { + switch vs & 0x3 { + case 0: + slen = int(d.r.readn1()) + case 1: + slen = int(bigen.Uint16(d.r.readx(2))) + case 2: + slen = int(bigen.Uint32(d.r.readx(4))) + case 3: + slen = int(bigen.Uint64(d.r.readx(8))) + } + // since using symbols, do not store any part of + // the parameter bs in the map, as it might be a shared buffer. + // bs2 = decByteSlice(d.r, slen, bs) + bs2 = decByteSlice(d.r, slen, nil) + if withString { + s = string(bs2) + } + d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2}) + } + default: + d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, bincVdSymbol, d.vd) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeString() (s string) { + // DecodeBytes does not accommodate symbols, whose impl stores string version in map. + // Use decStringAndBytes directly. + // return string(d.DecodeBytes(d.b[:], true, true)) + _, s = d.decStringAndBytes(d.b[:], true, true) + return +} + +func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { + if isstring { + bsOut, _ = d.decStringAndBytes(bs, false, zerocopy) + return + } + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return nil + } + var clen int + if d.vd == bincVdString || d.vd == bincVdByteArray { + clen = d.decLen() + } else { + d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, d.vd) + return + } + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, bs) +} + +func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.vd == bincVdCustomExt { + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(l) + } else if d.vd == bincVdByteArray { + xbs = d.DecodeBytes(nil, false, true) + } else { + d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := &d.d.n + var decodeFurther bool + + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + n.v = valueTypeNil + case bincSpFalse: + n.v = valueTypeBool + n.b = false + case bincSpTrue: + n.v = valueTypeBool + n.b = true + case bincSpNan: + n.v = valueTypeFloat + n.f = math.NaN() + case bincSpPosInf: + n.v = valueTypeFloat + n.f = math.Inf(1) + case bincSpNegInf: + n.v = valueTypeFloat + n.f = math.Inf(-1) + case bincSpZeroFloat: + n.v = valueTypeFloat + n.f = float64(0) + case bincSpZero: + n.v = valueTypeUint + n.u = uint64(0) // int8(0) + case bincSpNegOne: + n.v = valueTypeInt + n.i = int64(-1) // int8(-1) + default: + d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs) + } + case bincVdSmallInt: + n.v = valueTypeUint + n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 + case bincVdPosInt: + n.v = valueTypeUint + n.u = d.decUint() + case bincVdNegInt: + n.v = valueTypeInt + n.i = -(int64(d.decUint())) + case bincVdFloat: + n.v = valueTypeFloat + n.f = d.decFloat() + case bincVdSymbol: + n.v = valueTypeSymbol + n.s = d.DecodeString() + case bincVdString: + n.v = valueTypeString + n.s = d.DecodeString() + case bincVdByteArray: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + case bincVdTimestamp: + n.v = valueTypeTimestamp + tt, err := decodeTime(d.r.readx(int(d.vs))) + if err != nil { + panic(err) + } + n.t = tt + case bincVdCustomExt: + n.v = valueTypeExt + l := d.decLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(l) + case bincVdArray: + n.v = valueTypeArray + decodeFurther = true + case bincVdMap: + n.v = valueTypeMap + decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) + } + + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } + return +} + +//------------------------------------ + +//BincHandle is a Handle for the Binc Schema-Free Encoding Format +//defined at https://github.com/ugorji/binc . +// +//BincHandle currently supports all Binc features with the following EXCEPTIONS: +// - only integers up to 64 bits of precision are supported. +// big integers are unsupported. +// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). +// extended precision and decimal IEEE 754 floats are unsupported. +// - Only UTF-8 strings supported. +// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. +// +//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. +type BincHandle struct { + BasicHandle + binaryEncodingType +} + +func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + +func (h *BincHandle) newEncDriver(e *Encoder) encDriver { + return &bincEncDriver{e: e, w: e.w} +} + +func (h *BincHandle) newDecDriver(d *Decoder) decDriver { + return &bincDecDriver{d: d, r: d.r, h: h, br: d.bytes} +} + +func (e *bincEncDriver) reset() { + e.w = e.e.w + e.s = 0 + e.m = nil +} + +func (d *bincDecDriver) reset() { + d.r = d.d.r + d.s = nil + d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0 +} + +var _ decDriver = (*bincDecDriver)(nil) +var _ encDriver = (*bincEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go new file mode 100644 index 000000000..4fa349ac8 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/cbor.go @@ -0,0 +1,592 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" +) + +const ( + cborMajorUint byte = iota + cborMajorNegInt + cborMajorBytes + cborMajorText + cborMajorArray + cborMajorMap + cborMajorTag + cborMajorOther +) + +const ( + cborBdFalse byte = 0xf4 + iota + cborBdTrue + cborBdNil + cborBdUndefined + cborBdExt + cborBdFloat16 + cborBdFloat32 + cborBdFloat64 +) + +const ( + cborBdIndefiniteBytes byte = 0x5f + cborBdIndefiniteString = 0x7f + cborBdIndefiniteArray = 0x9f + cborBdIndefiniteMap = 0xbf + cborBdBreak = 0xff +) + +const ( + CborStreamBytes byte = 0x5f + CborStreamString = 0x7f + CborStreamArray = 0x9f + CborStreamMap = 0xbf + CborStreamBreak = 0xff +) + +const ( + cborBaseUint byte = 0x00 + cborBaseNegInt = 0x20 + cborBaseBytes = 0x40 + cborBaseString = 0x60 + cborBaseArray = 0x80 + cborBaseMap = 0xa0 + cborBaseTag = 0xc0 + cborBaseSimple = 0xe0 +) + +// ------------------- + +type cborEncDriver struct { + noBuiltInTypes + encNoSeparator + e *Encoder + w encWriter + h *CborHandle + x [8]byte +} + +func (e *cborEncDriver) EncodeNil() { + e.w.writen1(cborBdNil) +} + +func (e *cborEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(cborBdTrue) + } else { + e.w.writen1(cborBdFalse) + } +} + +func (e *cborEncDriver) EncodeFloat32(f float32) { + e.w.writen1(cborBdFloat32) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *cborEncDriver) EncodeFloat64(f float64) { + e.w.writen1(cborBdFloat64) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *cborEncDriver) encUint(v uint64, bd byte) { + if v <= 0x17 { + e.w.writen1(byte(v) + bd) + } else if v <= math.MaxUint8 { + e.w.writen2(bd+0x18, uint8(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd + 0x19) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd + 0x1a) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v)) + } else { // if v <= math.MaxUint64 { + e.w.writen1(bd + 0x1b) + bigenHelper{e.x[:8], e.w}.writeUint64(v) + } +} + +func (e *cborEncDriver) EncodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-1-v), cborBaseNegInt) + } else { + e.encUint(uint64(v), cborBaseUint) + } +} + +func (e *cborEncDriver) EncodeUint(v uint64) { + e.encUint(v, cborBaseUint) +} + +func (e *cborEncDriver) encLen(bd byte, length int) { + e.encUint(uint64(length), bd) +} + +func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + e.encUint(uint64(xtag), cborBaseTag) + if v := ext.ConvertExt(rv); v == nil { + e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + e.encUint(uint64(re.Tag), cborBaseTag) + if re.Data != nil { + en.encode(re.Data) + } else if re.Value == nil { + e.EncodeNil() + } else { + en.encode(re.Value) + } +} + +func (e *cborEncDriver) EncodeArrayStart(length int) { + e.encLen(cborBaseArray, length) +} + +func (e *cborEncDriver) EncodeMapStart(length int) { + e.encLen(cborBaseMap, length) +} + +func (e *cborEncDriver) EncodeString(c charEncoding, v string) { + e.encLen(cborBaseString, len(v)) + e.w.writestr(v) +} + +func (e *cborEncDriver) EncodeSymbol(v string) { + e.EncodeString(c_UTF8, v) +} + +func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + if c == c_RAW { + e.encLen(cborBaseBytes, len(v)) + } else { + e.encLen(cborBaseString, len(v)) + } + e.w.writeb(v) +} + +// ---------------------- + +type cborDecDriver struct { + d *Decoder + h *CborHandle + r decReader + b [scratchByteArrayLen]byte + br bool // bytes reader + bdRead bool + bd byte + noBuiltInTypes + decNoSeparator +} + +func (d *cborDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *cborDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *cborDecDriver) ContainerType() (vt valueType) { + if d.bd == cborBdNil { + return valueTypeNil + } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) { + return valueTypeBytes + } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) { + return valueTypeString + } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { + return valueTypeArray + } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *cborDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + // treat Nil and Undefined as nil values + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return true + } + return false +} + +func (d *cborDecDriver) CheckBreak() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdBreak { + d.bdRead = false + return true + } + return false +} + +func (d *cborDecDriver) decUint() (ui uint64) { + v := d.bd & 0x1f + if v <= 0x17 { + ui = uint64(v) + } else { + if v == 0x18 { + ui = uint64(d.r.readn1()) + } else if v == 0x19 { + ui = uint64(bigen.Uint16(d.r.readx(2))) + } else if v == 0x1a { + ui = uint64(bigen.Uint32(d.r.readx(4))) + } else if v == 0x1b { + ui = uint64(bigen.Uint64(d.r.readx(8))) + } else { + d.d.errorf("decUint: Invalid descriptor: %v", d.bd) + return + } + } + return +} + +func (d *cborDecDriver) decCheckInteger() (neg bool) { + if !d.bdRead { + d.readNextBd() + } + major := d.bd >> 5 + if major == cborMajorUint { + } else if major == cborMajorNegInt { + neg = true + } else { + d.d.errorf("invalid major: %v (bd: %v)", major, d.bd) + return + } + return +} + +func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) { + neg := d.decCheckInteger() + ui := d.decUint() + // check if this number can be converted to an int without overflow + var overflow bool + if neg { + if i, overflow = chkOvf.SignedInt(ui + 1); overflow { + d.d.errorf("cbor: overflow converting %v to signed integer", ui+1) + return + } + i = -i + } else { + if i, overflow = chkOvf.SignedInt(ui); overflow { + d.d.errorf("cbor: overflow converting %v to signed integer", ui) + return + } + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("cbor: overflow integer: %v", i) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + if d.decCheckInteger() { + d.d.errorf("Assigning negative signed value to unsigned type") + return + } + ui = d.decUint() + if chkOvf.Uint(ui, bitsize) { + d.d.errorf("cbor: overflow integer: %v", ui) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == cborBdFloat16 { + f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2))))) + } else if bd == cborBdFloat32 { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if bd == cborBdFloat64 { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else if bd >= cborBaseUint && bd < cborBaseBytes { + f = float64(d.DecodeInt(64)) + } else { + d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd) + return + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("cbor: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *cborDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == cborBdTrue { + b = true + } else if bd == cborBdFalse { + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) ReadMapStart() (length int) { + d.bdRead = false + if d.bd == cborBdIndefiniteMap { + return -1 + } + return d.decLen() +} + +func (d *cborDecDriver) ReadArrayStart() (length int) { + d.bdRead = false + if d.bd == cborBdIndefiniteArray { + return -1 + } + return d.decLen() +} + +func (d *cborDecDriver) decLen() int { + return int(d.decUint()) +} + +func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte { + d.bdRead = false + for { + if d.CheckBreak() { + break + } + if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText { + d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd) + return nil + } + n := d.decLen() + oldLen := len(bs) + newLen := oldLen + n + if newLen > cap(bs) { + bs2 := make([]byte, newLen, 2*cap(bs)+n) + copy(bs2, bs) + bs = bs2 + } else { + bs = bs[:newLen] + } + d.r.readb(bs[oldLen:newLen]) + // bs = append(bs, d.r.readn()...) + d.bdRead = false + } + d.bdRead = false + return bs +} + +func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return nil + } + if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { + if bs == nil { + return d.decAppendIndefiniteBytes(nil) + } + return d.decAppendIndefiniteBytes(bs[:0]) + } + clen := d.decLen() + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, bs) +} + +func (d *cborDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.b[:], true, true)) +} + +func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if !d.bdRead { + d.readNextBd() + } + u := d.decUint() + d.bdRead = false + realxtag = u + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + d.d.decode(&re.Value) + } else if xtag != realxtag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) + return + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := &d.d.n + var decodeFurther bool + + switch d.bd { + case cborBdNil: + n.v = valueTypeNil + case cborBdFalse: + n.v = valueTypeBool + n.b = false + case cborBdTrue: + n.v = valueTypeBool + n.b = true + case cborBdFloat16, cborBdFloat32: + n.v = valueTypeFloat + n.f = d.DecodeFloat(true) + case cborBdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat(false) + case cborBdIndefiniteBytes: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + case cborBdIndefiniteString: + n.v = valueTypeString + n.s = d.DecodeString() + case cborBdIndefiniteArray: + n.v = valueTypeArray + decodeFurther = true + case cborBdIndefiniteMap: + n.v = valueTypeMap + decodeFurther = true + default: + switch { + case d.bd >= cborBaseUint && d.bd < cborBaseNegInt: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt(64) + } else { + n.v = valueTypeUint + n.u = d.DecodeUint(64) + } + case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: + n.v = valueTypeInt + n.i = d.DecodeInt(64) + case d.bd >= cborBaseBytes && d.bd < cborBaseString: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + case d.bd >= cborBaseString && d.bd < cborBaseArray: + n.v = valueTypeString + n.s = d.DecodeString() + case d.bd >= cborBaseArray && d.bd < cborBaseMap: + n.v = valueTypeArray + decodeFurther = true + case d.bd >= cborBaseMap && d.bd < cborBaseTag: + n.v = valueTypeMap + decodeFurther = true + case d.bd >= cborBaseTag && d.bd < cborBaseSimple: + n.v = valueTypeExt + n.u = d.decUint() + n.l = nil + // d.bdRead = false + // d.d.decode(&re.Value) // handled by decode itself. + // decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + return + } + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +// ------------------------- + +// CborHandle is a Handle for the CBOR encoding format, +// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io . +// +// CBOR is comprehensively supported, including support for: +// - indefinite-length arrays/maps/bytes/strings +// - (extension) tags in range 0..0xffff (0 .. 65535) +// - half, single and double-precision floats +// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers) +// - nil, true, false, ... +// - arrays and maps, bytes and text strings +// +// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. +// Users can implement them as needed (using SetExt), including spec-documented ones: +// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. +// +// To encode with indefinite lengths (streaming), users will use +// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants. +// +// For example, to encode "one-byte" as an indefinite length string: +// var buf bytes.Buffer +// e := NewEncoder(&buf, new(CborHandle)) +// buf.WriteByte(CborStreamString) +// e.MustEncode("one-") +// e.MustEncode("byte") +// buf.WriteByte(CborStreamBreak) +// encodedBytes := buf.Bytes() +// var vv interface{} +// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv) +// // Now, vv contains the same string "one-byte" +// +type CborHandle struct { + binaryEncodingType + BasicHandle +} + +func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{i: ext}) +} + +func (h *CborHandle) newEncDriver(e *Encoder) encDriver { + return &cborEncDriver{e: e, w: e.w, h: h} +} + +func (h *CborHandle) newDecDriver(d *Decoder) decDriver { + return &cborDecDriver{d: d, r: d.r, h: h, br: d.bytes} +} + +func (e *cborEncDriver) reset() { + e.w = e.e.w +} + +func (d *cborDecDriver) reset() { + d.r = d.d.r + d.bd, d.bdRead = 0, false +} + +var _ decDriver = (*cborDecDriver)(nil) +var _ encDriver = (*cborEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go new file mode 100644 index 000000000..52c1dfe83 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/decode.go @@ -0,0 +1,2053 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + "errors" + "fmt" + "io" + "reflect" + "time" +) + +// Some tagging information for error messages. +const ( + msgBadDesc = "Unrecognized descriptor byte" + msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" +) + +var ( + onlyMapOrArrayCanDecodeIntoStructErr = errors.New("only encoded map or array can be decoded into a struct") + cannotDecodeIntoNilErr = errors.New("cannot decode into nil") +) + +// decReader abstracts the reading source, allowing implementations that can +// read from an io.Reader or directly off a byte slice with zero-copying. +type decReader interface { + unreadn1() + + // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR + // just return a view of the []byte being decoded from. + // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control. + readx(n int) []byte + readb([]byte) + readn1() uint8 + readn1eof() (v uint8, eof bool) + numread() int // number of bytes read + track() + stopTrack() []byte +} + +type decReaderByteScanner interface { + io.Reader + io.ByteScanner +} + +type decDriver interface { + // this will check if the next token is a break. + CheckBreak() bool + TryDecodeAsNil() bool + // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known. + ContainerType() (vt valueType) + IsBuiltinType(rt uintptr) bool + DecodeBuiltin(rt uintptr, v interface{}) + + // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. + // For maps and arrays, it will not do the decoding in-band, but will signal + // the decoder, so that is done later, by setting the decNaked.valueType field. + // + // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + // for extensions, DecodeNaked must read the tag and the []byte if it exists. + // if the []byte is not read, then kInterfaceNaked will treat it as a Handle + // that stores the subsequent value in-band, and complete reading the RawExt. + // + // extensions should also use readx to decode them, for efficiency. + // kInterface will extract the detached byte slice if it has to pass it outside its realm. + DecodeNaked() + DecodeInt(bitsize uint8) (i int64) + DecodeUint(bitsize uint8) (ui uint64) + DecodeFloat(chkOverflow32 bool) (f float64) + DecodeBool() (b bool) + // DecodeString can also decode symbols. + // It looks redundant as DecodeBytes is available. + // However, some codecs (e.g. binc) support symbols and can + // return a pre-stored string value, meaning that it can bypass + // the cost of []byte->string conversion. + DecodeString() (s string) + + // DecodeBytes may be called directly, without going through reflection. + // Consequently, it must be designed to handle possible nil. + DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) + + // decodeExt will decode into a *RawExt or into an extension. + DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64) + // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + ReadMapStart() int + ReadArrayStart() int + + reset() + uncacheRead() +} + +type decNoSeparator struct { +} + +func (_ decNoSeparator) ReadEnd() {} + +// func (_ decNoSeparator) uncacheRead() {} + +type DecodeOptions struct { + // MapType specifies type to use during schema-less decoding of a map in the stream. + // If nil, we use map[interface{}]interface{} + MapType reflect.Type + + // SliceType specifies type to use during schema-less decoding of an array in the stream. + // If nil, we use []interface{} + SliceType reflect.Type + + // MaxInitLen defines the initial length that we "make" a collection (slice, chan or map) with. + // If 0 or negative, we default to a sensible value based on the size of an element in the collection. + // + // For example, when decoding, a stream may say that it has MAX_UINT elements. + // We should not auto-matically provision a slice of that length, to prevent Out-Of-Memory crash. + // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. + MaxInitLen int + + // If ErrorIfNoField, return an error when decoding a map + // from a codec stream into a struct, and no matching struct field is found. + ErrorIfNoField bool + + // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. + // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, + // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). + ErrorIfNoArrayExpand bool + + // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). + SignedInteger bool + + // MapValueReset controls how we decode into a map value. + // + // By default, we MAY retrieve the mapping for a key, and then decode into that. + // However, especially with big maps, that retrieval may be expensive and unnecessary + // if the stream already contains all that is necessary to recreate the value. + // + // If true, we will never retrieve the previous mapping, + // but rather decode into a new value and set that in the map. + // + // If false, we will retrieve the previous mapping if necessary e.g. + // the previous mapping is a pointer, or is a struct or array with pre-set state, + // or is an interface. + MapValueReset bool + + // InterfaceReset controls how we decode into an interface. + // + // By default, when we see a field that is an interface{...}, + // or a map with interface{...} value, we will attempt decoding into the + // "contained" value. + // + // However, this prevents us from reading a string into an interface{} + // that formerly contained a number. + // + // If true, we will decode into a new "blank" value, and set that in the interface. + // If false, we will decode into whatever is contained in the interface. + InterfaceReset bool + + // InternString controls interning of strings during decoding. + // + // Some handles, e.g. json, typically will read map keys as strings. + // If the set of keys are finite, it may help reduce allocation to + // look them up from a map (than to allocate them afresh). + // + // Note: Handles will be smart when using the intern functionality. + // So everything will not be interned. + InternString bool + + // PreferArrayOverSlice controls whether to decode to an array or a slice. + // + // This only impacts decoding into a nil interface{}. + // Consequently, it has no effect on codecgen. + // + // *Note*: This only applies if using go1.5 and above, + // as it requires reflect.ArrayOf support which was absent before go1.5. + PreferArrayOverSlice bool +} + +// ------------------------------------ + +// ioDecByteScanner implements Read(), ReadByte(...), UnreadByte(...) methods +// of io.Reader, io.ByteScanner. +type ioDecByteScanner struct { + r io.Reader + l byte // last byte + ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread + b [1]byte // tiny buffer for reading single bytes +} + +func (z *ioDecByteScanner) Read(p []byte) (n int, err error) { + var firstByte bool + if z.ls == 1 { + z.ls = 2 + p[0] = z.l + if len(p) == 1 { + n = 1 + return + } + firstByte = true + p = p[1:] + } + n, err = z.r.Read(p) + if n > 0 { + if err == io.EOF && n == len(p) { + err = nil // read was successful, so postpone EOF (till next time) + } + z.l = p[n-1] + z.ls = 2 + } + if firstByte { + n++ + } + return +} + +func (z *ioDecByteScanner) ReadByte() (c byte, err error) { + n, err := z.Read(z.b[:]) + if n == 1 { + c = z.b[0] + if err == io.EOF { + err = nil // read was successful, so postpone EOF (till next time) + } + } + return +} + +func (z *ioDecByteScanner) UnreadByte() (err error) { + x := z.ls + if x == 0 { + err = errors.New("cannot unread - nothing has been read") + } else if x == 1 { + err = errors.New("cannot unread - last byte has not been read") + } else if x == 2 { + z.ls = 1 + } + return +} + +// ioDecReader is a decReader that reads off an io.Reader +type ioDecReader struct { + br decReaderByteScanner + // temp byte array re-used internally for efficiency during read. + // shares buffer with Decoder, so we keep size of struct within 8 words. + x *[scratchByteArrayLen]byte + bs ioDecByteScanner + n int // num read + tr []byte // tracking bytes read + trb bool +} + +func (z *ioDecReader) numread() int { + return z.n +} + +func (z *ioDecReader) readx(n int) (bs []byte) { + if n <= 0 { + return + } + if n < len(z.x) { + bs = z.x[:n] + } else { + bs = make([]byte, n) + } + if _, err := io.ReadAtLeast(z.br, bs, n); err != nil { + panic(err) + } + z.n += len(bs) + if z.trb { + z.tr = append(z.tr, bs...) + } + return +} + +func (z *ioDecReader) readb(bs []byte) { + if len(bs) == 0 { + return + } + n, err := io.ReadAtLeast(z.br, bs, len(bs)) + z.n += n + if err != nil { + panic(err) + } + if z.trb { + z.tr = append(z.tr, bs...) + } +} + +func (z *ioDecReader) readn1() (b uint8) { + b, err := z.br.ReadByte() + if err != nil { + panic(err) + } + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + return b +} + +func (z *ioDecReader) readn1eof() (b uint8, eof bool) { + b, err := z.br.ReadByte() + if err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + } else if err == io.EOF { + eof = true + } else { + panic(err) + } + return +} + +func (z *ioDecReader) unreadn1() { + err := z.br.UnreadByte() + if err != nil { + panic(err) + } + z.n-- + if z.trb { + if l := len(z.tr) - 1; l >= 0 { + z.tr = z.tr[:l] + } + } +} + +func (z *ioDecReader) track() { + if z.tr != nil { + z.tr = z.tr[:0] + } + z.trb = true +} + +func (z *ioDecReader) stopTrack() (bs []byte) { + z.trb = false + return z.tr +} + +// ------------------------------------ + +var bytesDecReaderCannotUnreadErr = errors.New("cannot unread last byte read") + +// bytesDecReader is a decReader that reads off a byte slice with zero copying +type bytesDecReader struct { + b []byte // data + c int // cursor + a int // available + t int // track start +} + +func (z *bytesDecReader) reset(in []byte) { + z.b = in + z.a = len(in) + z.c = 0 + z.t = 0 +} + +func (z *bytesDecReader) numread() int { + return z.c +} + +func (z *bytesDecReader) unreadn1() { + if z.c == 0 || len(z.b) == 0 { + panic(bytesDecReaderCannotUnreadErr) + } + z.c-- + z.a++ + return +} + +func (z *bytesDecReader) readx(n int) (bs []byte) { + // slicing from a non-constant start position is more expensive, + // as more computation is required to decipher the pointer start position. + // However, we do it only once, and it's better than reslicing both z.b and return value. + + if n <= 0 { + } else if z.a == 0 { + panic(io.EOF) + } else if n > z.a { + panic(io.ErrUnexpectedEOF) + } else { + c0 := z.c + z.c = c0 + n + z.a = z.a - n + bs = z.b[c0:z.c] + } + return +} + +func (z *bytesDecReader) readn1() (v uint8) { + if z.a == 0 { + panic(io.EOF) + } + v = z.b[z.c] + z.c++ + z.a-- + return +} + +func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { + if z.a == 0 { + eof = true + return + } + v = z.b[z.c] + z.c++ + z.a-- + return +} + +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readx(len(bs))) +} + +func (z *bytesDecReader) track() { + z.t = z.c +} + +func (z *bytesDecReader) stopTrack() (bs []byte) { + return z.b[z.t:z.c] +} + +// ------------------------------------ + +type decFnInfo struct { + d *Decoder + ti *typeInfo + xfFn Ext + xfTag uint64 + seq seqType +} + +// ---------------------------------------- + +type decFn struct { + i decFnInfo + f func(*decFnInfo, reflect.Value) +} + +func (f *decFnInfo) builtin(rv reflect.Value) { + f.d.d.DecodeBuiltin(f.ti.rtid, rv.Addr().Interface()) +} + +func (f *decFnInfo) rawExt(rv reflect.Value) { + f.d.d.DecodeExt(rv.Addr().Interface(), 0, nil) +} + +func (f *decFnInfo) raw(rv reflect.Value) { + rv.SetBytes(f.d.raw()) +} + +func (f *decFnInfo) ext(rv reflect.Value) { + f.d.d.DecodeExt(rv.Addr().Interface(), f.xfTag, f.xfFn) +} + +func (f *decFnInfo) getValueForUnmarshalInterface(rv reflect.Value, indir int8) (v interface{}) { + if indir == -1 { + v = rv.Addr().Interface() + } else if indir == 0 { + v = rv.Interface() + } else { + for j := int8(0); j < indir; j++ { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + v = rv.Interface() + } + return +} + +func (f *decFnInfo) selferUnmarshal(rv reflect.Value) { + f.getValueForUnmarshalInterface(rv, f.ti.csIndir).(Selfer).CodecDecodeSelf(f.d) +} + +func (f *decFnInfo) binaryUnmarshal(rv reflect.Value) { + bm := f.getValueForUnmarshalInterface(rv, f.ti.bunmIndir).(encoding.BinaryUnmarshaler) + xbs := f.d.d.DecodeBytes(nil, false, true) + if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) textUnmarshal(rv reflect.Value) { + tm := f.getValueForUnmarshalInterface(rv, f.ti.tunmIndir).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) + if fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) jsonUnmarshal(rv reflect.Value) { + tm := f.getValueForUnmarshalInterface(rv, f.ti.junmIndir).(jsonUnmarshaler) + // bs := f.d.d.DecodeBytes(f.d.b[:], true, true) + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) kErr(rv reflect.Value) { + f.d.errorf("no decoding function defined for kind %v", rv.Kind()) +} + +func (f *decFnInfo) kString(rv reflect.Value) { + rv.SetString(f.d.d.DecodeString()) +} + +func (f *decFnInfo) kBool(rv reflect.Value) { + rv.SetBool(f.d.d.DecodeBool()) +} + +func (f *decFnInfo) kInt(rv reflect.Value) { + rv.SetInt(f.d.d.DecodeInt(intBitsize)) +} + +func (f *decFnInfo) kInt64(rv reflect.Value) { + rv.SetInt(f.d.d.DecodeInt(64)) +} + +func (f *decFnInfo) kInt32(rv reflect.Value) { + rv.SetInt(f.d.d.DecodeInt(32)) +} + +func (f *decFnInfo) kInt8(rv reflect.Value) { + rv.SetInt(f.d.d.DecodeInt(8)) +} + +func (f *decFnInfo) kInt16(rv reflect.Value) { + rv.SetInt(f.d.d.DecodeInt(16)) +} + +func (f *decFnInfo) kFloat32(rv reflect.Value) { + rv.SetFloat(f.d.d.DecodeFloat(true)) +} + +func (f *decFnInfo) kFloat64(rv reflect.Value) { + rv.SetFloat(f.d.d.DecodeFloat(false)) +} + +func (f *decFnInfo) kUint8(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(8)) +} + +func (f *decFnInfo) kUint64(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(64)) +} + +func (f *decFnInfo) kUint(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(uintBitsize)) +} + +func (f *decFnInfo) kUintptr(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(uintBitsize)) +} + +func (f *decFnInfo) kUint32(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(32)) +} + +func (f *decFnInfo) kUint16(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(16)) +} + +// func (f *decFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") +// if rv.IsNil() { +// rv.Set(reflect.New(rv.Type().Elem())) +// } +// f.d.decodeValue(rv.Elem()) +// } + +// var kIntfCtr uint64 + +func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) { + // nil interface: + // use some hieristics to decode it appropriately + // based on the detected next value in the stream. + d := f.d + d.d.DecodeNaked() + n := &d.n + if n.v == valueTypeNil { + return + } + // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader). + // if num := f.ti.rt.NumMethod(); num > 0 { + if f.ti.numMeth > 0 { + d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + return + } + // var useRvn bool + switch n.v { + case valueTypeMap: + // if d.h.MapType == nil || d.h.MapType == mapIntfIntfTyp { + // } else if d.h.MapType == mapStrIntfTyp { // for json performance + // } + if d.mtid == 0 || d.mtid == mapIntfIntfTypId { + l := len(n.ms) + n.ms = append(n.ms, nil) + var v2 interface{} = &n.ms[l] + d.decode(v2) + rvn = reflect.ValueOf(v2).Elem() + n.ms = n.ms[:l] + } else if d.mtid == mapStrIntfTypId { // for json performance + l := len(n.ns) + n.ns = append(n.ns, nil) + var v2 interface{} = &n.ns[l] + d.decode(v2) + rvn = reflect.ValueOf(v2).Elem() + n.ns = n.ns[:l] + } else { + rvn = reflect.New(d.h.MapType).Elem() + d.decodeValue(rvn, nil) + } + case valueTypeArray: + // if d.h.SliceType == nil || d.h.SliceType == intfSliceTyp { + if d.stid == 0 || d.stid == intfSliceTypId { + l := len(n.ss) + n.ss = append(n.ss, nil) + var v2 interface{} = &n.ss[l] + d.decode(v2) + n.ss = n.ss[:l] + rvn = reflect.ValueOf(v2).Elem() + if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice { + rvn = reflectArrayOf(rvn) + } + } else { + rvn = reflect.New(d.h.SliceType).Elem() + d.decodeValue(rvn, nil) + } + case valueTypeExt: + var v interface{} + tag, bytes := n.u, n.l // calling decode below might taint the values + if bytes == nil { + l := len(n.is) + n.is = append(n.is, nil) + v2 := &n.is[l] + d.decode(v2) + v = *v2 + n.is = n.is[:l] + } + bfn := d.h.getExtForTag(tag) + if bfn == nil { + var re RawExt + re.Tag = tag + re.Data = detachZeroCopyBytes(d.bytes, nil, bytes) + rvn = reflect.ValueOf(re) + } else { + rvnA := reflect.New(bfn.rt) + rvn = rvnA.Elem() + if bytes != nil { + bfn.ext.ReadExt(rvnA.Interface(), bytes) + } else { + bfn.ext.UpdateExt(rvnA.Interface(), v) + } + } + case valueTypeNil: + // no-op + case valueTypeInt: + rvn = reflect.ValueOf(&n.i).Elem() + case valueTypeUint: + rvn = reflect.ValueOf(&n.u).Elem() + case valueTypeFloat: + rvn = reflect.ValueOf(&n.f).Elem() + case valueTypeBool: + rvn = reflect.ValueOf(&n.b).Elem() + case valueTypeString, valueTypeSymbol: + rvn = reflect.ValueOf(&n.s).Elem() + case valueTypeBytes: + rvn = reflect.ValueOf(&n.l).Elem() + case valueTypeTimestamp: + rvn = reflect.ValueOf(&n.t).Elem() + default: + panic(fmt.Errorf("kInterfaceNaked: unexpected valueType: %d", n.v)) + } + return +} + +func (f *decFnInfo) kInterface(rv reflect.Value) { + // debugf("\t===> kInterface") + + // Note: + // A consequence of how kInterface works, is that + // if an interface already contains something, we try + // to decode into what was there before. + // We do not replace with a generic value (as got from decodeNaked). + + var rvn reflect.Value + if rv.IsNil() { + rvn = f.kInterfaceNaked() + if rvn.IsValid() { + rv.Set(rvn) + } + } else if f.d.h.InterfaceReset { + rvn = f.kInterfaceNaked() + if rvn.IsValid() { + rv.Set(rvn) + } else { + // reset to zero value based on current type in there. + rv.Set(reflect.Zero(rv.Elem().Type())) + } + } else { + rvn = rv.Elem() + // Note: interface{} is settable, but underlying type may not be. + // Consequently, we have to set the reflect.Value directly. + // if underlying type is settable (e.g. ptr or interface), + // we just decode into it. + // Else we create a settable value, decode into it, and set on the interface. + if rvn.CanSet() { + f.d.decodeValue(rvn, nil) + } else { + rvn2 := reflect.New(rvn.Type()).Elem() + rvn2.Set(rvn) + f.d.decodeValue(rvn2, nil) + rv.Set(rvn2) + } + } +} + +func (f *decFnInfo) kStruct(rv reflect.Value) { + fti := f.ti + d := f.d + dd := d.d + cr := d.cr + ctyp := dd.ContainerType() + if ctyp == valueTypeMap { + containerLen := dd.ReadMapStart() + if containerLen == 0 { + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return + } + tisfi := fti.sfi + hasLen := containerLen >= 0 + if hasLen { + for j := 0; j < containerLen; j++ { + // rvkencname := dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapKey) + } + rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true)) + // rvksi := ti.getForEncName(rvkencname) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if k := fti.indexForEncName(rvkencname); k > -1 { + si := tisfi[k] + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(si.field(rv, true), nil) + } + } else { + d.structFieldNotFound(-1, rvkencname) + } + } + } else { + for j := 0; !dd.CheckBreak(); j++ { + // rvkencname := dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapKey) + } + rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true)) + // rvksi := ti.getForEncName(rvkencname) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if k := fti.indexForEncName(rvkencname); k > -1 { + si := tisfi[k] + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(si.field(rv, true), nil) + } + } else { + d.structFieldNotFound(-1, rvkencname) + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + } else if ctyp == valueTypeArray { + containerLen := dd.ReadArrayStart() + if containerLen == 0 { + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } + return + } + // Not much gain from doing it two ways for array. + // Arrays are not used as much for structs. + hasLen := containerLen >= 0 + for j, si := range fti.sfip { + if hasLen { + if j == containerLen { + break + } + } else if dd.CheckBreak() { + break + } + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(si.field(rv, true), nil) + } + } + if containerLen > len(fti.sfip) { + // read remaining values and throw away + for j := len(fti.sfip); j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + d.structFieldNotFound(j, "") + } + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } + } else { + f.d.error(onlyMapOrArrayCanDecodeIntoStructErr) + return + } +} + +func (f *decFnInfo) kSlice(rv reflect.Value) { + // A slice can be set from a map or array in stream. + // This way, the order can be kept (as order is lost with map). + ti := f.ti + d := f.d + dd := d.d + rtelem0 := ti.rt.Elem() + ctyp := dd.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + // you can only decode bytes or string in the stream into a slice or array of bytes + if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) { + f.d.errorf("bytes or string in the stream must be decoded into a slice or array of bytes, not %v", ti.rt) + } + if f.seq == seqTypeChan { + bs2 := dd.DecodeBytes(nil, false, true) + ch := rv.Interface().(chan<- byte) + for _, b := range bs2 { + ch <- b + } + } else { + rvbs := rv.Bytes() + bs2 := dd.DecodeBytes(rvbs, false, false) + if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) { + if rv.CanSet() { + rv.SetBytes(bs2) + } else { + copy(rvbs, bs2) + } + } + } + return + } + + // array := f.seq == seqTypeChan + + slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) + + // // an array can never return a nil slice. so no need to check f.array here. + if containerLenS == 0 { + if f.seq == seqTypeSlice { + if rv.IsNil() { + rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) + } else { + rv.SetLen(0) + } + } else if f.seq == seqTypeChan { + if rv.IsNil() { + rv.Set(reflect.MakeChan(ti.rt, 0)) + } + } + slh.End() + return + } + + rtelem := rtelem0 + for rtelem.Kind() == reflect.Ptr { + rtelem = rtelem.Elem() + } + fn := d.getDecFn(rtelem, true, true) + + var rv0, rv9 reflect.Value + rv0 = rv + rvChanged := false + + // for j := 0; j < containerLenS; j++ { + var rvlen int + if containerLenS > 0 { // hasLen + if f.seq == seqTypeChan { + if rv.IsNil() { + rvlen, _ = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size())) + rv.Set(reflect.MakeChan(ti.rt, rvlen)) + } + // handle chan specially: + for j := 0; j < containerLenS; j++ { + rv9 = reflect.New(rtelem0).Elem() + slh.ElemContainerState(j) + d.decodeValue(rv9, fn) + rv.Send(rv9) + } + } else { // slice or array + var truncated bool // says len of sequence is not same as expected number of elements + numToRead := containerLenS // if truncated, reset numToRead + + rvcap := rv.Cap() + rvlen = rv.Len() + if containerLenS > rvcap { + if f.seq == seqTypeArray { + d.arrayCannotExpand(rvlen, containerLenS) + } else { + oldRvlenGtZero := rvlen > 0 + rvlen, truncated = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size())) + if truncated { + if rvlen <= rvcap { + rv.SetLen(rvlen) + } else { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvChanged = true + } + } else { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvChanged = true + } + if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { + reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) + } + rvcap = rvlen + } + numToRead = rvlen + } else if containerLenS != rvlen { + if f.seq == seqTypeSlice { + rv.SetLen(containerLenS) + rvlen = containerLenS + } + } + j := 0 + // we read up to the numToRead + for ; j < numToRead; j++ { + slh.ElemContainerState(j) + d.decodeValue(rv.Index(j), fn) + } + + // if slice, expand and read up to containerLenS (or EOF) iff truncated + // if array, swallow all the rest. + + if f.seq == seqTypeArray { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } else if truncated { // slice was truncated, as chan NOT in this block + for ; j < containerLenS; j++ { + rv = expandSliceValue(rv, 1) + rv9 = rv.Index(j) + if resetSliceElemToZeroValue { + rv9.Set(reflect.Zero(rtelem0)) + } + slh.ElemContainerState(j) + d.decodeValue(rv9, fn) + } + } + } + } else { + rvlen = rv.Len() + j := 0 + for ; !dd.CheckBreak(); j++ { + if f.seq == seqTypeChan { + slh.ElemContainerState(j) + rv9 = reflect.New(rtelem0).Elem() + d.decodeValue(rv9, fn) + rv.Send(rv9) + } else { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= rvlen { + if f.seq == seqTypeArray { + d.arrayCannotExpand(rvlen, j+1) + decodeIntoBlank = true + } else { // if f.seq == seqTypeSlice + // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // uses append logic, plus varargs + rv = expandSliceValue(rv, 1) + rv9 = rv.Index(j) + // rv.Index(rv.Len() - 1).Set(reflect.Zero(rtelem0)) + if resetSliceElemToZeroValue { + rv9.Set(reflect.Zero(rtelem0)) + } + rvlen++ + rvChanged = true + } + } else { // slice or array + rv9 = rv.Index(j) + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { // seqTypeSlice + d.decodeValue(rv9, fn) + } + } + } + if f.seq == seqTypeSlice { + if j < rvlen { + rv.SetLen(j) + } else if j == 0 && rv.IsNil() { + rv = reflect.MakeSlice(ti.rt, 0, 0) + rvChanged = true + } + } + } + slh.End() + + if rvChanged { + rv0.Set(rv) + } +} + +func (f *decFnInfo) kArray(rv reflect.Value) { + // f.d.decodeValue(rv.Slice(0, rv.Len())) + f.kSlice(rv.Slice(0, rv.Len())) +} + +func (f *decFnInfo) kMap(rv reflect.Value) { + d := f.d + dd := d.d + containerLen := dd.ReadMapStart() + cr := d.cr + ti := f.ti + if rv.IsNil() { + rv.Set(reflect.MakeMap(ti.rt)) + } + + if containerLen == 0 { + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return + } + + ktype, vtype := ti.rt.Key(), ti.rt.Elem() + ktypeId := reflect.ValueOf(ktype).Pointer() + vtypeKind := vtype.Kind() + var keyFn, valFn *decFn + var xtyp reflect.Type + for xtyp = ktype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() { + } + keyFn = d.getDecFn(xtyp, true, true) + for xtyp = vtype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() { + } + valFn = d.getDecFn(xtyp, true, true) + var mapGet, mapSet bool + if !f.d.h.MapValueReset { + // if pointer, mapGet = true + // if interface, mapGet = true if !DecodeNakedAlways (else false) + // if builtin, mapGet = false + // else mapGet = true + if vtypeKind == reflect.Ptr { + mapGet = true + } else if vtypeKind == reflect.Interface { + if !f.d.h.InterfaceReset { + mapGet = true + } + } else if !isImmutableKind(vtypeKind) { + mapGet = true + } + } + + var rvk, rvv, rvz reflect.Value + + // for j := 0; j < containerLen; j++ { + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + rvk = reflect.New(ktype).Elem() + if cr != nil { + cr.sendContainerState(containerMapKey) + } + d.decodeValue(rvk, keyFn) + + // special case if a byte array. + if ktypeId == intfTypId { + rvk = rvk.Elem() + if rvk.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(d.string(rvk.Bytes())) + } + } + mapSet = true // set to false if u do a get, and its a pointer, and exists + if mapGet { + rvv = rv.MapIndex(rvk) + if rvv.IsValid() { + if vtypeKind == reflect.Ptr { + mapSet = false + } + } else { + if rvz.IsValid() { + rvz.Set(reflect.Zero(vtype)) + } else { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } + } else { + if rvz.IsValid() { + rvz.Set(reflect.Zero(vtype)) + } else { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + d.decodeValue(rvv, valFn) + if mapSet { + rv.SetMapIndex(rvk, rvv) + } + } + } else { + for j := 0; !dd.CheckBreak(); j++ { + rvk = reflect.New(ktype).Elem() + if cr != nil { + cr.sendContainerState(containerMapKey) + } + d.decodeValue(rvk, keyFn) + + // special case if a byte array. + if ktypeId == intfTypId { + rvk = rvk.Elem() + if rvk.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(d.string(rvk.Bytes())) + } + } + mapSet = true // set to false if u do a get, and its a pointer, and exists + if mapGet { + rvv = rv.MapIndex(rvk) + if rvv.IsValid() { + if vtypeKind == reflect.Ptr { + mapSet = false + } + } else { + if rvz.IsValid() { + rvz.Set(reflect.Zero(vtype)) + } else { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } + } else { + if rvz.IsValid() { + rvz.Set(reflect.Zero(vtype)) + } else { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + d.decodeValue(rvv, valFn) + if mapSet { + rv.SetMapIndex(rvk, rvv) + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +type decRtidFn struct { + rtid uintptr + fn decFn +} + +// decNaked is used to keep track of the primitives decoded. +// Without it, we would have to decode each primitive and wrap it +// in an interface{}, causing an allocation. +// In this model, the primitives are decoded in a "pseudo-atomic" fashion, +// so we can rest assured that no other decoding happens while these +// primitives are being decoded. +// +// maps and arrays are not handled by this mechanism. +// However, RawExt is, and we accommodate for extensions that decode +// RawExt from DecodeNaked, but need to decode the value subsequently. +// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. +// +// However, decNaked also keeps some arrays of default maps and slices +// used in DecodeNaked. This way, we can get a pointer to it +// without causing a new heap allocation. +// +// kInterfaceNaked will ensure that there is no allocation for the common +// uses. +type decNaked struct { + // r RawExt // used for RawExt, uint, []byte. + u uint64 + i int64 + f float64 + l []byte + s string + t time.Time + b bool + v valueType + + // stacks for reducing allocation + is []interface{} + ms []map[interface{}]interface{} + ns []map[string]interface{} + ss [][]interface{} + // rs []RawExt + + // keep arrays at the bottom? Chance is that they are not used much. + ia [4]interface{} + ma [4]map[interface{}]interface{} + na [4]map[string]interface{} + sa [4][]interface{} + // ra [2]RawExt +} + +func (n *decNaked) reset() { + if n.ss != nil { + n.ss = n.ss[:0] + } + if n.is != nil { + n.is = n.is[:0] + } + if n.ms != nil { + n.ms = n.ms[:0] + } + if n.ns != nil { + n.ns = n.ns[:0] + } +} + +// A Decoder reads and decodes an object from an input stream in the codec format. +type Decoder struct { + // hopefully, reduce derefencing cost by laying the decReader inside the Decoder. + // Try to put things that go together to fit within a cache line (8 words). + + d decDriver + // NOTE: Decoder shouldn't call it's read methods, + // as the handler MAY need to do some coordination. + r decReader + // sa [initCollectionCap]decRtidFn + h *BasicHandle + hh Handle + + be bool // is binary encoding + bytes bool // is bytes reader + js bool // is json handle + + rb bytesDecReader + ri ioDecReader + cr containerStateRecv + + s []decRtidFn + f map[uintptr]*decFn + + // _ uintptr // for alignment purposes, so next one starts from a cache line + + // cache the mapTypeId and sliceTypeId for faster comparisons + mtid uintptr + stid uintptr + + n decNaked + b [scratchByteArrayLen]byte + is map[string]string // used for interning strings +} + +// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. +// +// For efficiency, Users are encouraged to pass in a memory buffered reader +// (eg bufio.Reader, bytes.Buffer). +func NewDecoder(r io.Reader, h Handle) *Decoder { + d := newDecoder(h) + d.Reset(r) + return d +} + +// NewDecoderBytes returns a Decoder which efficiently decodes directly +// from a byte slice with zero copying. +func NewDecoderBytes(in []byte, h Handle) *Decoder { + d := newDecoder(h) + d.ResetBytes(in) + return d +} + +func newDecoder(h Handle) *Decoder { + d := &Decoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} + n := &d.n + // n.rs = n.ra[:0] + n.ms = n.ma[:0] + n.is = n.ia[:0] + n.ns = n.na[:0] + n.ss = n.sa[:0] + _, d.js = h.(*JsonHandle) + if d.h.InternString { + d.is = make(map[string]string, 32) + } + d.d = h.newDecDriver(d) + d.cr, _ = d.d.(containerStateRecv) + // d.d = h.newDecDriver(decReaderT{true, &d.rb, &d.ri}) + return d +} + +func (d *Decoder) resetCommon() { + d.n.reset() + d.d.reset() + // reset all things which were cached from the Handle, + // but could be changed. + d.mtid, d.stid = 0, 0 + if d.h.MapType != nil { + d.mtid = reflect.ValueOf(d.h.MapType).Pointer() + } + if d.h.SliceType != nil { + d.stid = reflect.ValueOf(d.h.SliceType).Pointer() + } +} + +func (d *Decoder) Reset(r io.Reader) { + d.ri.x = &d.b + // d.s = d.sa[:0] + d.ri.bs.r = r + var ok bool + d.ri.br, ok = r.(decReaderByteScanner) + if !ok { + d.ri.br = &d.ri.bs + } + d.r = &d.ri + d.resetCommon() +} + +func (d *Decoder) ResetBytes(in []byte) { + // d.s = d.sa[:0] + d.rb.reset(in) + d.r = &d.rb + d.resetCommon() +} + +// func (d *Decoder) sendContainerState(c containerState) { +// if d.cr != nil { +// d.cr.sendContainerState(c) +// } +// } + +// Decode decodes the stream from reader and stores the result in the +// value pointed to by v. v cannot be a nil pointer. v can also be +// a reflect.Value of a pointer. +// +// Note that a pointer to a nil interface is not a nil pointer. +// If you do not know what type of stream it is, pass in a pointer to a nil interface. +// We will decode and store a value in that nil interface. +// +// Sample usages: +// // Decoding into a non-nil typed value +// var f float32 +// err = codec.NewDecoder(r, handle).Decode(&f) +// +// // Decoding into nil interface +// var v interface{} +// dec := codec.NewDecoder(r, handle) +// err = dec.Decode(&v) +// +// When decoding into a nil interface{}, we will decode into an appropriate value based +// on the contents of the stream: +// - Numbers are decoded as float64, int64 or uint64. +// - Other values are decoded appropriately depending on the type: +// bool, string, []byte, time.Time, etc +// - Extensions are decoded as RawExt (if no ext function registered for the tag) +// Configurations exist on the Handle to override defaults +// (e.g. for MapType, SliceType and how to decode raw bytes). +// +// When decoding into a non-nil interface{} value, the mode of encoding is based on the +// type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error +// - Else decode it based on its reflect.Kind +// +// There are some special rules when decoding into containers (slice/array/map/struct). +// Decode will typically use the stream contents to UPDATE the container. +// - A map can be decoded from a stream map, by updating matching keys. +// - A slice can be decoded from a stream array, +// by updating the first n elements, where n is length of the stream. +// - A slice can be decoded from a stream map, by decoding as if +// it contains a sequence of key-value pairs. +// - A struct can be decoded from a stream map, by updating matching fields. +// - A struct can be decoded from a stream array, +// by updating fields as they occur in the struct (by index). +// +// When decoding a stream map or array with length of 0 into a nil map or slice, +// we reset the destination map or slice to a zero-length value. +// +// However, when decoding a stream nil, we reset the destination container +// to its "zero" value (e.g. nil for slice/map, etc). +// +func (d *Decoder) Decode(v interface{}) (err error) { + defer panicToErr(&err) + d.decode(v) + return +} + +// this is not a smart swallow, as it allocates objects and does unnecessary work. +func (d *Decoder) swallowViaHammer() { + var blank interface{} + d.decodeValue(reflect.ValueOf(&blank).Elem(), nil) +} + +func (d *Decoder) swallow() { + // smarter decode that just swallows the content + dd := d.d + if dd.TryDecodeAsNil() { + return + } + cr := d.cr + switch dd.ContainerType() { + case valueTypeMap: + containerLen := dd.ReadMapStart() + clenGtEqualZero := containerLen >= 0 + for j := 0; ; j++ { + if clenGtEqualZero { + if j >= containerLen { + break + } + } else if dd.CheckBreak() { + break + } + if cr != nil { + cr.sendContainerState(containerMapKey) + } + d.swallow() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + d.swallow() + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + case valueTypeArray: + containerLenS := dd.ReadArrayStart() + clenGtEqualZero := containerLenS >= 0 + for j := 0; ; j++ { + if clenGtEqualZero { + if j >= containerLenS { + break + } + } else if dd.CheckBreak() { + break + } + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + d.swallow() + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } + case valueTypeBytes: + dd.DecodeBytes(d.b[:], false, true) + case valueTypeString: + dd.DecodeBytes(d.b[:], true, true) + // dd.DecodeStringAsBytes(d.b[:]) + default: + // these are all primitives, which we can get from decodeNaked + // if RawExt using Value, complete the processing. + dd.DecodeNaked() + if n := &d.n; n.v == valueTypeExt && n.l == nil { + l := len(n.is) + n.is = append(n.is, nil) + v2 := &n.is[l] + d.decode(v2) + n.is = n.is[:l] + } + } +} + +// MustDecode is like Decode, but panics if unable to Decode. +// This provides insight to the code location that triggered the error. +func (d *Decoder) MustDecode(v interface{}) { + d.decode(v) +} + +func (d *Decoder) decode(iv interface{}) { + // if ics, ok := iv.(Selfer); ok { + // ics.CodecDecodeSelf(d) + // return + // } + + if d.d.TryDecodeAsNil() { + switch v := iv.(type) { + case nil: + case *string: + *v = "" + case *bool: + *v = false + case *int: + *v = 0 + case *int8: + *v = 0 + case *int16: + *v = 0 + case *int32: + *v = 0 + case *int64: + *v = 0 + case *uint: + *v = 0 + case *uint8: + *v = 0 + case *uint16: + *v = 0 + case *uint32: + *v = 0 + case *uint64: + *v = 0 + case *float32: + *v = 0 + case *float64: + *v = 0 + case *[]uint8: + *v = nil + case *Raw: + *v = nil + case reflect.Value: + if v.Kind() != reflect.Ptr || v.IsNil() { + d.errNotValidPtrValue(v) + } + // d.chkPtrValue(v) + v = v.Elem() + if v.IsValid() { + v.Set(reflect.Zero(v.Type())) + } + default: + rv := reflect.ValueOf(iv) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + d.errNotValidPtrValue(rv) + } + // d.chkPtrValue(rv) + rv = rv.Elem() + if rv.IsValid() { + rv.Set(reflect.Zero(rv.Type())) + } + } + return + } + + switch v := iv.(type) { + case nil: + d.error(cannotDecodeIntoNilErr) + return + + case Selfer: + v.CodecDecodeSelf(d) + + case reflect.Value: + if v.Kind() != reflect.Ptr || v.IsNil() { + d.errNotValidPtrValue(v) + } + // d.chkPtrValue(v) + d.decodeValueNotNil(v.Elem(), nil) + + case *string: + *v = d.d.DecodeString() + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(d.d.DecodeInt(intBitsize)) + case *int8: + *v = int8(d.d.DecodeInt(8)) + case *int16: + *v = int16(d.d.DecodeInt(16)) + case *int32: + *v = int32(d.d.DecodeInt(32)) + case *int64: + *v = d.d.DecodeInt(64) + case *uint: + *v = uint(d.d.DecodeUint(uintBitsize)) + case *uint8: + *v = uint8(d.d.DecodeUint(8)) + case *uint16: + *v = uint16(d.d.DecodeUint(16)) + case *uint32: + *v = uint32(d.d.DecodeUint(32)) + case *uint64: + *v = d.d.DecodeUint(64) + case *float32: + *v = float32(d.d.DecodeFloat(true)) + case *float64: + *v = d.d.DecodeFloat(false) + case *[]uint8: + *v = d.d.DecodeBytes(*v, false, false) + + case *Raw: + *v = d.raw() + + case *interface{}: + d.decodeValueNotNil(reflect.ValueOf(iv).Elem(), nil) + + default: + if !fastpathDecodeTypeSwitch(iv, d) { + d.decodeI(iv, true, false, false, false) + } + } +} + +func (d *Decoder) preDecodeValue(rv reflect.Value, tryNil bool) (rv2 reflect.Value, proceed bool) { + if tryNil && d.d.TryDecodeAsNil() { + // No need to check if a ptr, recursively, to determine + // whether to set value to nil. + // Just always set value to its zero type. + if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid + rv.Set(reflect.Zero(rv.Type())) + } + return + } + + // If stream is not containing a nil value, then we can deref to the base + // non-pointer value, and decode into that. + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + return rv, true +} + +func (d *Decoder) decodeI(iv interface{}, checkPtr, tryNil, checkFastpath, checkCodecSelfer bool) { + rv := reflect.ValueOf(iv) + if checkPtr { + if rv.Kind() != reflect.Ptr || rv.IsNil() { + d.errNotValidPtrValue(rv) + } + // d.chkPtrValue(rv) + } + rv, proceed := d.preDecodeValue(rv, tryNil) + if proceed { + fn := d.getDecFn(rv.Type(), checkFastpath, checkCodecSelfer) + fn.f(&fn.i, rv) + } +} + +func (d *Decoder) decodeValue(rv reflect.Value, fn *decFn) { + if rv, proceed := d.preDecodeValue(rv, true); proceed { + if fn == nil { + fn = d.getDecFn(rv.Type(), true, true) + } + fn.f(&fn.i, rv) + } +} + +func (d *Decoder) decodeValueNotNil(rv reflect.Value, fn *decFn) { + if rv, proceed := d.preDecodeValue(rv, false); proceed { + if fn == nil { + fn = d.getDecFn(rv.Type(), true, true) + } + fn.f(&fn.i, rv) + } +} + +func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *decFn) { + rtid := reflect.ValueOf(rt).Pointer() + + // retrieve or register a focus'ed function for this type + // to eliminate need to do the retrieval multiple times + + // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } + var ok bool + if useMapForCodecCache { + fn, ok = d.f[rtid] + } else { + for i := range d.s { + v := &(d.s[i]) + if v.rtid == rtid { + fn, ok = &(v.fn), true + break + } + } + } + if ok { + return + } + + if useMapForCodecCache { + if d.f == nil { + d.f = make(map[uintptr]*decFn, initCollectionCap) + } + fn = new(decFn) + d.f[rtid] = fn + } else { + if d.s == nil { + d.s = make([]decRtidFn, 0, initCollectionCap) + } + d.s = append(d.s, decRtidFn{rtid: rtid}) + fn = &(d.s[len(d.s)-1]).fn + } + + // debugf("\tCreating new dec fn for type: %v\n", rt) + ti := d.h.getTypeInfo(rtid, rt) + fi := &(fn.i) + fi.d = d + fi.ti = ti + + // An extension can be registered for any type, regardless of the Kind + // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. + // + // We can't check if it's an extension byte here first, because the user may have + // registered a pointer or non-pointer type, meaning we may have to recurse first + // before matching a mapped type, even though the extension byte is already detected. + // + // NOTE: if decoding into a nil interface{}, we return a non-nil + // value except even if the container registers a length of 0. + if checkCodecSelfer && ti.cs { + fn.f = (*decFnInfo).selferUnmarshal + } else if rtid == rawExtTypId { + fn.f = (*decFnInfo).rawExt + } else if rtid == rawTypId { + fn.f = (*decFnInfo).raw + } else if d.d.IsBuiltinType(rtid) { + fn.f = (*decFnInfo).builtin + } else if xfFn := d.h.getExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.f = (*decFnInfo).ext + } else if supportMarshalInterfaces && d.be && ti.bunm { + fn.f = (*decFnInfo).binaryUnmarshal + } else if supportMarshalInterfaces && !d.be && d.js && ti.junm { + //If JSON, we should check JSONUnmarshal before textUnmarshal + fn.f = (*decFnInfo).jsonUnmarshal + } else if supportMarshalInterfaces && !d.be && ti.tunm { + fn.f = (*decFnInfo).textUnmarshal + } else { + rk := rt.Kind() + if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { + if rt.PkgPath() == "" { + if idx := fastpathAV.index(rtid); idx != -1 { + fn.f = fastpathAV[idx].decfn + } + } else { + // use mapping for underlying type if there + ok = false + var rtu reflect.Type + if rk == reflect.Map { + rtu = reflect.MapOf(rt.Key(), rt.Elem()) + } else { + rtu = reflect.SliceOf(rt.Elem()) + } + rtuid := reflect.ValueOf(rtu).Pointer() + if idx := fastpathAV.index(rtuid); idx != -1 { + xfnf := fastpathAV[idx].decfn + xrt := fastpathAV[idx].rt + fn.f = func(xf *decFnInfo, xrv reflect.Value) { + // xfnf(xf, xrv.Convert(xrt)) + xfnf(xf, xrv.Addr().Convert(reflect.PtrTo(xrt)).Elem()) + } + } + } + } + if fn.f == nil { + switch rk { + case reflect.String: + fn.f = (*decFnInfo).kString + case reflect.Bool: + fn.f = (*decFnInfo).kBool + case reflect.Int: + fn.f = (*decFnInfo).kInt + case reflect.Int64: + fn.f = (*decFnInfo).kInt64 + case reflect.Int32: + fn.f = (*decFnInfo).kInt32 + case reflect.Int8: + fn.f = (*decFnInfo).kInt8 + case reflect.Int16: + fn.f = (*decFnInfo).kInt16 + case reflect.Float32: + fn.f = (*decFnInfo).kFloat32 + case reflect.Float64: + fn.f = (*decFnInfo).kFloat64 + case reflect.Uint8: + fn.f = (*decFnInfo).kUint8 + case reflect.Uint64: + fn.f = (*decFnInfo).kUint64 + case reflect.Uint: + fn.f = (*decFnInfo).kUint + case reflect.Uint32: + fn.f = (*decFnInfo).kUint32 + case reflect.Uint16: + fn.f = (*decFnInfo).kUint16 + // case reflect.Ptr: + // fn.f = (*decFnInfo).kPtr + case reflect.Uintptr: + fn.f = (*decFnInfo).kUintptr + case reflect.Interface: + fn.f = (*decFnInfo).kInterface + case reflect.Struct: + fn.f = (*decFnInfo).kStruct + case reflect.Chan: + fi.seq = seqTypeChan + fn.f = (*decFnInfo).kSlice + case reflect.Slice: + fi.seq = seqTypeSlice + fn.f = (*decFnInfo).kSlice + case reflect.Array: + fi.seq = seqTypeArray + fn.f = (*decFnInfo).kArray + case reflect.Map: + fn.f = (*decFnInfo).kMap + default: + fn.f = (*decFnInfo).kErr + } + } + } + + return +} + +func (d *Decoder) structFieldNotFound(index int, rvkencname string) { + // NOTE: rvkencname may be a stringView, so don't pass it to another function. + if d.h.ErrorIfNoField { + if index >= 0 { + d.errorf("no matching struct field found when decoding stream array at index %v", index) + return + } else if rvkencname != "" { + d.errorf("no matching struct field found when decoding stream map with key " + rvkencname) + return + } + } + d.swallow() +} + +func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { + if d.h.ErrorIfNoArrayExpand { + d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen) + } +} + +func (d *Decoder) chkPtrValue(rv reflect.Value) { + // We can only decode into a non-nil pointer + if rv.Kind() == reflect.Ptr && !rv.IsNil() { + return + } + d.errNotValidPtrValue(rv) +} + +func (d *Decoder) errNotValidPtrValue(rv reflect.Value) { + if !rv.IsValid() { + d.error(cannotDecodeIntoNilErr) + return + } + if !rv.CanInterface() { + d.errorf("cannot decode into a value without an interface: %v", rv) + return + } + rvi := rv.Interface() + d.errorf("cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", rv.Kind(), rvi, rvi) +} + +func (d *Decoder) error(err error) { + panic(err) +} + +func (d *Decoder) errorf(format string, params ...interface{}) { + params2 := make([]interface{}, len(params)+1) + params2[0] = d.r.numread() + copy(params2[1:], params) + err := fmt.Errorf("[pos %d]: "+format, params2...) + panic(err) +} + +func (d *Decoder) string(v []byte) (s string) { + if d.is != nil { + s, ok := d.is[string(v)] // no allocation here. + if !ok { + s = string(v) + d.is[s] = s + } + return s + } + return string(v) // don't return stringView, as we need a real string here. +} + +func (d *Decoder) intern(s string) { + if d.is != nil { + d.is[s] = s + } +} + +// nextValueBytes returns the next value in the stream as a set of bytes. +func (d *Decoder) nextValueBytes() []byte { + d.d.uncacheRead() + d.r.track() + d.swallow() + return d.r.stopTrack() +} + +func (d *Decoder) raw() []byte { + // ensure that this is not a view into the bytes + // i.e. make new copy always. + bs := d.nextValueBytes() + bs2 := make([]byte, len(bs)) + copy(bs2, bs) + return bs2 +} + +// -------------------------------------------------- + +// decSliceHelper assists when decoding into a slice, from a map or an array in the stream. +// A slice can be set from a map or array in stream. This supports the MapBySlice interface. +type decSliceHelper struct { + d *Decoder + // ct valueType + array bool +} + +func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { + dd := d.d + ctyp := dd.ContainerType() + if ctyp == valueTypeArray { + x.array = true + clen = dd.ReadArrayStart() + } else if ctyp == valueTypeMap { + clen = dd.ReadMapStart() * 2 + } else { + d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp) + } + // x.ct = ctyp + x.d = d + return +} + +func (x decSliceHelper) End() { + cr := x.d.cr + if cr == nil { + return + } + if x.array { + cr.sendContainerState(containerArrayEnd) + } else { + cr.sendContainerState(containerMapEnd) + } +} + +func (x decSliceHelper) ElemContainerState(index int) { + cr := x.d.cr + if cr == nil { + return + } + if x.array { + cr.sendContainerState(containerArrayElem) + } else { + if index%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } +} + +func decByteSlice(r decReader, clen int, bs []byte) (bsOut []byte) { + if clen == 0 { + return zeroByteSlice + } + if len(bs) == clen { + bsOut = bs + } else if cap(bs) >= clen { + bsOut = bs[:clen] + } else { + bsOut = make([]byte, clen) + } + r.readb(bsOut) + return +} + +func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) { + if xlen := len(in); xlen > 0 { + if isBytesReader || xlen <= scratchByteArrayLen { + if cap(dest) >= xlen { + out = dest[:xlen] + } else { + out = make([]byte, xlen) + } + copy(out, in) + return + } + } + return in +} + +// decInferLen will infer a sensible length, given the following: +// - clen: length wanted. +// - maxlen: max length to be returned. +// if <= 0, it is unset, and we infer it based on the unit size +// - unit: number of bytes for each element of the collection +func decInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { + // handle when maxlen is not set i.e. <= 0 + if clen <= 0 { + return + } + if maxlen <= 0 { + // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items. + // maxlen = 256 * 1024 / unit + // if maxlen < (4 * 1024) { + // maxlen = 4 * 1024 + // } + if unit < (256 / 4) { + maxlen = 256 * 1024 / unit + } else { + maxlen = 4 * 1024 + } + } + if clen > maxlen { + rvlen = maxlen + truncated = true + } else { + rvlen = clen + } + return + // if clen <= 0 { + // rvlen = 0 + // } else if maxlen > 0 && clen > maxlen { + // rvlen = maxlen + // truncated = true + // } else { + // rvlen = clen + // } + // return +} + +// // implement overall decReader wrapping both, for possible use inline: +// type decReaderT struct { +// bytes bool +// rb *bytesDecReader +// ri *ioDecReader +// } +// +// // implement *Decoder as a decReader. +// // Using decReaderT (defined just above) caused performance degradation +// // possibly because of constant copying the value, +// // and some value->interface conversion causing allocation. +// func (d *Decoder) unreadn1() { +// if d.bytes { +// d.rb.unreadn1() +// } else { +// d.ri.unreadn1() +// } +// } +// ... for other methods of decReader. +// Testing showed that performance improvement was negligible. diff --git a/vendor/github.com/ugorji/go/codec/decode_go.go b/vendor/github.com/ugorji/go/codec/decode_go.go new file mode 100644 index 000000000..ba289cef6 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/decode_go.go @@ -0,0 +1,16 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = true + +func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) { + rvn2 = reflect.New(reflect.ArrayOf(rvn.Len(), intfTyp)).Elem() + reflect.Copy(rvn2, rvn) + return +} diff --git a/vendor/github.com/ugorji/go/codec/decode_go14.go b/vendor/github.com/ugorji/go/codec/decode_go14.go new file mode 100644 index 000000000..50063bc8f --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/decode_go14.go @@ -0,0 +1,14 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = false + +func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) { + panic("reflect.ArrayOf unsupported") +} diff --git a/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go new file mode 100644 index 000000000..c2cef812e --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/encode.go @@ -0,0 +1,1461 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + "fmt" + "io" + "reflect" + "sort" + "sync" +) + +const ( + defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 +) + +// AsSymbolFlag defines what should be encoded as symbols. +type AsSymbolFlag uint8 + +const ( + // AsSymbolDefault is default. + // Currently, this means only encode struct field names as symbols. + // The default is subject to change. + AsSymbolDefault AsSymbolFlag = iota + + // AsSymbolAll means encode anything which could be a symbol as a symbol. + AsSymbolAll = 0xfe + + // AsSymbolNone means do not encode anything as a symbol. + AsSymbolNone = 1 << iota + + // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. + AsSymbolMapStringKeysFlag + + // AsSymbolStructFieldName means encode struct field names as symbols. + AsSymbolStructFieldNameFlag +) + +// encWriter abstracts writing to a byte array or to an io.Writer. +type encWriter interface { + writeb([]byte) + writestr(string) + writen1(byte) + writen2(byte, byte) + atEndOfEncode() +} + +// encDriver abstracts the actual codec (binc vs msgpack, etc) +type encDriver interface { + IsBuiltinType(rt uintptr) bool + EncodeBuiltin(rt uintptr, v interface{}) + EncodeNil() + EncodeInt(i int64) + EncodeUint(i uint64) + EncodeBool(b bool) + EncodeFloat32(f float32) + EncodeFloat64(f float64) + // encodeExtPreamble(xtag byte, length int) + EncodeRawExt(re *RawExt, e *Encoder) + EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder) + EncodeArrayStart(length int) + EncodeMapStart(length int) + EncodeString(c charEncoding, v string) + EncodeSymbol(v string) + EncodeStringBytes(c charEncoding, v []byte) + //TODO + //encBignum(f *big.Int) + //encStringRunes(c charEncoding, v []rune) + + reset() +} + +type encDriverAsis interface { + EncodeAsis(v []byte) +} + +type encNoSeparator struct{} + +func (_ encNoSeparator) EncodeEnd() {} + +type ioEncWriterWriter interface { + WriteByte(c byte) error + WriteString(s string) (n int, err error) + Write(p []byte) (n int, err error) +} + +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) +} + +type EncodeOptions struct { + // Encode a struct as an array, and not as a map + StructToArray bool + + // Canonical representation means that encoding a value will always result in the same + // sequence of bytes. + // + // This only affects maps, as the iteration order for maps is random. + // + // The implementation MAY use the natural sort order for the map keys if possible: + // + // - If there is a natural sort order (ie for number, bool, string or []byte keys), + // then the map keys are first sorted in natural order and then written + // with corresponding map values to the strema. + // - If there is no natural sort order, then the map keys will first be + // encoded into []byte, and then sorted, + // before writing the sorted keys and the corresponding map values to the stream. + // + Canonical bool + + // CheckCircularRef controls whether we check for circular references + // and error fast during an encode. + // + // If enabled, an error is received if a pointer to a struct + // references itself either directly or through one of its fields (iteratively). + // + // This is opt-in, as there may be a performance hit to checking circular references. + CheckCircularRef bool + + // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers + // when checking if a value is empty. + // + // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls. + RecursiveEmptyCheck bool + + // Raw controls whether we encode Raw values. + // This is a "dangerous" option and must be explicitly set. + // If set, we blindly encode Raw values as-is, without checking + // if they are a correct representation of a value in that format. + // If unset, we error out. + Raw bool + + // AsSymbols defines what should be encoded as symbols. + // + // Encoding as symbols can reduce the encoded size significantly. + // + // However, during decoding, each string to be encoded as a symbol must + // be checked to see if it has been seen before. Consequently, encoding time + // will increase if using symbols, because string comparisons has a clear cost. + // + // Sample values: + // AsSymbolNone + // AsSymbolAll + // AsSymbolMapStringKeys + // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag + AsSymbols AsSymbolFlag +} + +// --------------------------------------------- + +type simpleIoEncWriterWriter struct { + w io.Writer + bw io.ByteWriter + sw ioEncStringWriter + bs [1]byte +} + +func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { + if o.bw != nil { + return o.bw.WriteByte(c) + } + // _, err = o.w.Write([]byte{c}) + o.bs[0] = c + _, err = o.w.Write(o.bs[:]) + return +} + +func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { + if o.sw != nil { + return o.sw.WriteString(s) + } + // return o.w.Write([]byte(s)) + return o.w.Write(bytesView(s)) +} + +func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { + return o.w.Write(p) +} + +// ---------------------------------------- + +// ioEncWriter implements encWriter and can write to an io.Writer implementation +type ioEncWriter struct { + w ioEncWriterWriter + s simpleIoEncWriterWriter + // x [8]byte // temp byte array re-used internally for efficiency +} + +func (z *ioEncWriter) writeb(bs []byte) { + if len(bs) == 0 { + return + } + n, err := z.w.Write(bs) + if err != nil { + panic(err) + } + if n != len(bs) { + panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n)) + } +} + +func (z *ioEncWriter) writestr(s string) { + n, err := z.w.WriteString(s) + if err != nil { + panic(err) + } + if n != len(s) { + panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n)) + } +} + +func (z *ioEncWriter) writen1(b byte) { + if err := z.w.WriteByte(b); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen2(b1 byte, b2 byte) { + z.writen1(b1) + z.writen1(b2) +} + +func (z *ioEncWriter) atEndOfEncode() {} + +// ---------------------------------------- + +// bytesEncWriter implements encWriter and can write to an byte slice. +// It is used by Marshal function. +type bytesEncWriter struct { + b []byte + c int // cursor + out *[]byte // write out on atEndOfEncode +} + +func (z *bytesEncWriter) writeb(s []byte) { + if len(s) == 0 { + return + } + oc, a := z.growNoAlloc(len(s)) + if a { + z.growAlloc(len(s), oc) + } + copy(z.b[oc:], s) +} + +func (z *bytesEncWriter) writestr(s string) { + if len(s) == 0 { + return + } + oc, a := z.growNoAlloc(len(s)) + if a { + z.growAlloc(len(s), oc) + } + copy(z.b[oc:], s) +} + +func (z *bytesEncWriter) writen1(b1 byte) { + oc, a := z.growNoAlloc(1) + if a { + z.growAlloc(1, oc) + } + z.b[oc] = b1 +} + +func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { + oc, a := z.growNoAlloc(2) + if a { + z.growAlloc(2, oc) + } + z.b[oc+1] = b2 + z.b[oc] = b1 +} + +func (z *bytesEncWriter) atEndOfEncode() { + *(z.out) = z.b[:z.c] +} + +// have a growNoalloc(n int), which can be inlined. +// if allocation is needed, then call growAlloc(n int) + +func (z *bytesEncWriter) growNoAlloc(n int) (oldcursor int, allocNeeded bool) { + oldcursor = z.c + z.c = z.c + n + if z.c > len(z.b) { + if z.c > cap(z.b) { + allocNeeded = true + } else { + z.b = z.b[:cap(z.b)] + } + } + return +} + +func (z *bytesEncWriter) growAlloc(n int, oldcursor int) { + // appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls. + // bytes.Buffer model (2*cap + n): much better + // bs := make([]byte, 2*cap(z.b)+n) + bs := make([]byte, growCap(cap(z.b), 1, n)) + copy(bs, z.b[:oldcursor]) + z.b = bs +} + +// --------------------------------------------- + +type encFnInfo struct { + e *Encoder + ti *typeInfo + xfFn Ext + xfTag uint64 + seq seqType +} + +func (f *encFnInfo) builtin(rv reflect.Value) { + f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface()) +} + +func (f *encFnInfo) raw(rv reflect.Value) { + f.e.raw(rv.Interface().(Raw)) +} + +func (f *encFnInfo) rawExt(rv reflect.Value) { + // rev := rv.Interface().(RawExt) + // f.e.e.EncodeRawExt(&rev, f.e) + var re *RawExt + if rv.CanAddr() { + re = rv.Addr().Interface().(*RawExt) + } else { + rev := rv.Interface().(RawExt) + re = &rev + } + f.e.e.EncodeRawExt(re, f.e) +} + +func (f *encFnInfo) ext(rv reflect.Value) { + // if this is a struct|array and it was addressable, then pass the address directly (not the value) + if k := rv.Kind(); (k == reflect.Struct || k == reflect.Array) && rv.CanAddr() { + rv = rv.Addr() + } + f.e.e.EncodeExt(rv.Interface(), f.xfTag, f.xfFn, f.e) +} + +func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v interface{}, proceed bool) { + if indir == 0 { + v = rv.Interface() + } else if indir == -1 { + // If a non-pointer was passed to Encode(), then that value is not addressable. + // Take addr if addressable, else copy value to an addressable value. + if rv.CanAddr() { + v = rv.Addr().Interface() + } else { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + // fmt.Printf("rv.Type: %v, rv2.Type: %v, v: %v\n", rv.Type(), rv2.Type(), v) + } + } else { + for j := int8(0); j < indir; j++ { + if rv.IsNil() { + f.e.e.EncodeNil() + return + } + rv = rv.Elem() + } + v = rv.Interface() + } + return v, true +} + +func (f *encFnInfo) selferMarshal(rv reflect.Value) { + if v, proceed := f.getValueForMarshalInterface(rv, f.ti.csIndir); proceed { + v.(Selfer).CodecEncodeSelf(f.e) + } +} + +func (f *encFnInfo) binaryMarshal(rv reflect.Value) { + if v, proceed := f.getValueForMarshalInterface(rv, f.ti.bmIndir); proceed { + bs, fnerr := v.(encoding.BinaryMarshaler).MarshalBinary() + f.e.marshal(bs, fnerr, false, c_RAW) + } +} + +func (f *encFnInfo) textMarshal(rv reflect.Value) { + if v, proceed := f.getValueForMarshalInterface(rv, f.ti.tmIndir); proceed { + // debugf(">>>> encoding.TextMarshaler: %T", rv.Interface()) + bs, fnerr := v.(encoding.TextMarshaler).MarshalText() + f.e.marshal(bs, fnerr, false, c_UTF8) + } +} + +func (f *encFnInfo) jsonMarshal(rv reflect.Value) { + if v, proceed := f.getValueForMarshalInterface(rv, f.ti.jmIndir); proceed { + bs, fnerr := v.(jsonMarshaler).MarshalJSON() + f.e.marshal(bs, fnerr, true, c_UTF8) + } +} + +func (f *encFnInfo) kBool(rv reflect.Value) { + f.e.e.EncodeBool(rv.Bool()) +} + +func (f *encFnInfo) kString(rv reflect.Value) { + f.e.e.EncodeString(c_UTF8, rv.String()) +} + +func (f *encFnInfo) kFloat64(rv reflect.Value) { + f.e.e.EncodeFloat64(rv.Float()) +} + +func (f *encFnInfo) kFloat32(rv reflect.Value) { + f.e.e.EncodeFloat32(float32(rv.Float())) +} + +func (f *encFnInfo) kInt(rv reflect.Value) { + f.e.e.EncodeInt(rv.Int()) +} + +func (f *encFnInfo) kUint(rv reflect.Value) { + f.e.e.EncodeUint(rv.Uint()) +} + +func (f *encFnInfo) kInvalid(rv reflect.Value) { + f.e.e.EncodeNil() +} + +func (f *encFnInfo) kErr(rv reflect.Value) { + f.e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) +} + +func (f *encFnInfo) kSlice(rv reflect.Value) { + ti := f.ti + // array may be non-addressable, so we have to manage with care + // (don't call rv.Bytes, rv.Slice, etc). + // E.g. type struct S{B [2]byte}; + // Encode(S{}) will bomb on "panic: slice of unaddressable array". + e := f.e + if f.seq != seqTypeArray { + if rv.IsNil() { + e.e.EncodeNil() + return + } + // If in this method, then there was no extension function defined. + // So it's okay to treat as []byte. + if ti.rtid == uint8SliceTypId { + e.e.EncodeStringBytes(c_RAW, rv.Bytes()) + return + } + } + cr := e.cr + rtelem := ti.rt.Elem() + l := rv.Len() + if ti.rtid == uint8SliceTypId || rtelem.Kind() == reflect.Uint8 { + switch f.seq { + case seqTypeArray: + // if l == 0 { e.e.encodeStringBytes(c_RAW, nil) } else + if rv.CanAddr() { + e.e.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes()) + } else { + var bs []byte + if l <= cap(e.b) { + bs = e.b[:l] + } else { + bs = make([]byte, l) + } + reflect.Copy(reflect.ValueOf(bs), rv) + // TODO: Test that reflect.Copy works instead of manual one-by-one + // for i := 0; i < l; i++ { + // bs[i] = byte(rv.Index(i).Uint()) + // } + e.e.EncodeStringBytes(c_RAW, bs) + } + case seqTypeSlice: + e.e.EncodeStringBytes(c_RAW, rv.Bytes()) + case seqTypeChan: + bs := e.b[:0] + // do not use range, so that the number of elements encoded + // does not change, and encoding does not hang waiting on someone to close chan. + // for b := range rv.Interface().(<-chan byte) { + // bs = append(bs, b) + // } + ch := rv.Interface().(<-chan byte) + for i := 0; i < l; i++ { + bs = append(bs, <-ch) + } + e.e.EncodeStringBytes(c_RAW, bs) + } + return + } + + if ti.mbs { + if l%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", l) + return + } + e.e.EncodeMapStart(l / 2) + } else { + e.e.EncodeArrayStart(l) + } + + if l > 0 { + for rtelem.Kind() == reflect.Ptr { + rtelem = rtelem.Elem() + } + // if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + var fn *encFn + if rtelem.Kind() != reflect.Interface { + rtelemid := reflect.ValueOf(rtelem).Pointer() + fn = e.getEncFn(rtelemid, rtelem, true, true) + } + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + for j := 0; j < l; j++ { + if cr != nil { + if ti.mbs { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } else { + cr.sendContainerState(containerArrayElem) + } + } + if f.seq == seqTypeChan { + if rv2, ok2 := rv.Recv(); ok2 { + e.encodeValue(rv2, fn) + } else { + e.encode(nil) // WE HAVE TO DO SOMETHING, so nil if nothing received. + } + } else { + e.encodeValue(rv.Index(j), fn) + } + } + } + + if cr != nil { + if ti.mbs { + cr.sendContainerState(containerMapEnd) + } else { + cr.sendContainerState(containerArrayEnd) + } + } +} + +func (f *encFnInfo) kStruct(rv reflect.Value) { + fti := f.ti + e := f.e + cr := e.cr + tisfi := fti.sfip + toMap := !(fti.toArray || e.h.StructToArray) + newlen := len(fti.sfi) + + // Use sync.Pool to reduce allocating slices unnecessarily. + // The cost of sync.Pool is less than the cost of new allocation. + pool, poolv, fkvs := encStructPoolGet(newlen) + + // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) + if toMap { + tisfi = fti.sfi + } + newlen = 0 + var kv stringRv + recur := e.h.RecursiveEmptyCheck + for _, si := range tisfi { + kv.r = si.field(rv, false) + if toMap { + if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { + continue + } + kv.v = si.encName + } else { + // use the zero value. + // if a reference or struct, set to nil (so you do not output too much) + if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { + switch kv.r.Kind() { + case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice: + kv.r = reflect.Value{} //encode as nil + } + } + } + fkvs[newlen] = kv + newlen++ + } + + // debugf(">>>> kStruct: newlen: %v", newlen) + // sep := !e.be + ee := e.e //don't dereference every time + + if toMap { + ee.EncodeMapStart(newlen) + // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + for j := 0; j < newlen; j++ { + kv = fkvs[j] + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(kv.v) + } else { + ee.EncodeString(c_UTF8, kv.v) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(kv.r, nil) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + } else { + ee.EncodeArrayStart(newlen) + for j := 0; j < newlen; j++ { + kv = fkvs[j] + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + e.encodeValue(kv.r, nil) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } + } + + // do not use defer. Instead, use explicit pool return at end of function. + // defer has a cost we are trying to avoid. + // If there is a panic and these slices are not returned, it is ok. + if pool != nil { + pool.Put(poolv) + } +} + +// func (f *encFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") +// if rv.IsNil() { +// f.e.e.encodeNil() +// return +// } +// f.e.encodeValue(rv.Elem()) +// } + +// func (f *encFnInfo) kInterface(rv reflect.Value) { +// println("kInterface called") +// debug.PrintStack() +// if rv.IsNil() { +// f.e.e.EncodeNil() +// return +// } +// f.e.encodeValue(rv.Elem(), nil) +// } + +func (f *encFnInfo) kMap(rv reflect.Value) { + ee := f.e.e + if rv.IsNil() { + ee.EncodeNil() + return + } + + l := rv.Len() + ee.EncodeMapStart(l) + e := f.e + cr := e.cr + if l == 0 { + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return + } + var asSymbols bool + // determine the underlying key and val encFn's for the map. + // This eliminates some work which is done for each loop iteration i.e. + // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn. + // + // However, if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + var keyFn, valFn *encFn + ti := f.ti + rtkey := ti.rt.Key() + rtval := ti.rt.Elem() + rtkeyid := reflect.ValueOf(rtkey).Pointer() + // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String + var keyTypeIsString = rtkeyid == stringTypId + if keyTypeIsString { + asSymbols = e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + } else { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + rtkeyid = reflect.ValueOf(rtkey).Pointer() + keyFn = e.getEncFn(rtkeyid, rtkey, true, true) + } + } + for rtval.Kind() == reflect.Ptr { + rtval = rtval.Elem() + } + if rtval.Kind() != reflect.Interface { + rtvalid := reflect.ValueOf(rtval).Pointer() + valFn = e.getEncFn(rtvalid, rtval, true, true) + } + mks := rv.MapKeys() + // for j, lmks := 0, len(mks); j < lmks; j++ { + + if e.h.Canonical { + e.kMapCanonical(rtkeyid, rtkey, rv, mks, valFn, asSymbols) + } else { + for j := range mks { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if keyTypeIsString { + if asSymbols { + ee.EncodeSymbol(mks[j].String()) + } else { + ee.EncodeString(c_UTF8, mks[j].String()) + } + } else { + e.encodeValue(mks[j], keyFn) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mks[j]), valFn) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *encFn, asSymbols bool) { + ee := e.e + cr := e.cr + // we previously did out-of-band if an extension was registered. + // This is not necessary, as the natural kind is sufficient for ordering. + + if rtkeyid == uint8SliceTypId { + mksv := make([]bytesRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Bytes() + } + sort.Sort(bytesRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeStringBytes(c_RAW, mksv[i].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + } else { + switch rtkey.Kind() { + case reflect.Bool: + mksv := make([]boolRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Bool() + } + sort.Sort(boolRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(mksv[i].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + case reflect.String: + mksv := make([]stringRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.String() + } + sort.Sort(stringRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(mksv[i].v) + } else { + ee.EncodeString(c_UTF8, mksv[i].v) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]uintRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + sort.Sort(uintRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(mksv[i].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]intRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + sort.Sort(intRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(mksv[i].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + case reflect.Float32: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(mksv[i].v)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + case reflect.Float64: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(mksv[i].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + default: + // out-of-band + // first encode each key to a []byte first, then sort them, then record + var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + mksbv := make([]bytesRv, len(mks)) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + e2.MustEncode(k) + v.r = k + v.v = mksv[l:] + // fmt.Printf(">>>>> %s\n", mksv[l:]) + } + sort.Sort(bytesRvSlice(mksbv)) + for j := range mksbv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(mksbv[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksbv[j].r), valFn) + } + } + } +} + +// -------------------------------------------------- + +// encFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type encFn struct { + i encFnInfo + f func(*encFnInfo, reflect.Value) +} + +// -------------------------------------------------- + +type encRtidFn struct { + rtid uintptr + fn encFn +} + +// An Encoder writes an object to an output stream in the codec format. +type Encoder struct { + // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder + e encDriver + // NOTE: Encoder shouldn't call it's write methods, + // as the handler MAY need to do some coordination. + w encWriter + s []encRtidFn + ci set + be bool // is binary encoding + js bool // is json handle + + wi ioEncWriter + wb bytesEncWriter + + h *BasicHandle + hh Handle + + cr containerStateRecv + as encDriverAsis + + f map[uintptr]*encFn + b [scratchByteArrayLen]byte +} + +// NewEncoder returns an Encoder for encoding into an io.Writer. +// +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Writer, bytes.Buffer). +func NewEncoder(w io.Writer, h Handle) *Encoder { + e := newEncoder(h) + e.Reset(w) + return e +} + +// NewEncoderBytes returns an encoder for encoding directly and efficiently +// into a byte slice, using zero-copying to temporary slices. +// +// It will potentially replace the output byte slice pointed to. +// After encoding, the out parameter contains the encoded contents. +func NewEncoderBytes(out *[]byte, h Handle) *Encoder { + e := newEncoder(h) + e.ResetBytes(out) + return e +} + +func newEncoder(h Handle) *Encoder { + e := &Encoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} + _, e.js = h.(*JsonHandle) + e.e = h.newEncDriver(e) + e.as, _ = e.e.(encDriverAsis) + e.cr, _ = e.e.(containerStateRecv) + return e +} + +// Reset the Encoder with a new output stream. +// +// This accommodates using the state of the Encoder, +// where it has "cached" information about sub-engines. +func (e *Encoder) Reset(w io.Writer) { + ww, ok := w.(ioEncWriterWriter) + if ok { + e.wi.w = ww + } else { + sww := &e.wi.s + sww.w = w + sww.bw, _ = w.(io.ByteWriter) + sww.sw, _ = w.(ioEncStringWriter) + e.wi.w = sww + //ww = bufio.NewWriterSize(w, defEncByteBufSize) + } + e.w = &e.wi + e.e.reset() +} + +func (e *Encoder) ResetBytes(out *[]byte) { + in := *out + if in == nil { + in = make([]byte, defEncByteBufSize) + } + e.wb.b, e.wb.out, e.wb.c = in, out, 0 + e.w = &e.wb + e.e.reset() +} + +// func (e *Encoder) sendContainerState(c containerState) { +// if e.cr != nil { +// e.cr.sendContainerState(c) +// } +// } + +// Encode writes an object into a stream. +// +// Encoding can be configured via the struct tag for the fields. +// The "codec" key in struct field's tag value is the key name, +// followed by an optional comma and options. +// Note that the "json" key is used in the absence of the "codec" key. +// +// To set an option on all fields (e.g. omitempty on all fields), you +// can create a field called _struct, and set flags on it. +// +// Struct values "usually" encode as maps. Each exported struct field is encoded unless: +// - the field's tag is "-", OR +// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option. +// +// When encoding as a map, the first string in the tag (before the comma) +// is the map key string to use when encoding. +// +// However, struct values may encode as arrays. This happens when: +// - StructToArray Encode option is set, OR +// - the tag on the _struct field sets the "toarray" option +// +// Values with types that implement MapBySlice are encoded as stream maps. +// +// The empty values (for omitempty option) are false, 0, any nil pointer +// or interface value, and any array, slice, map, or string of length zero. +// +// Anonymous fields are encoded inline except: +// - the struct tag specifies a replacement name (first value) +// - the field is of an interface type +// +// Examples: +// +// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below. +// type MyStruct struct { +// _struct bool `codec:",omitempty"` //set omitempty for every field +// Field1 string `codec:"-"` //skip this field +// Field2 int `codec:"myName"` //Use key "myName" in encode stream +// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. +// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. +// io.Reader //use key "Reader". +// MyStruct `codec:"my1" //use key "my1". +// MyStruct //inline it +// ... +// } +// +// type MyStruct struct { +// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field +// //and encode struct as an array +// } +// +// The mode of encoding is based on the type of the value. When a value is seen: +// - If a Selfer, call its CodecEncodeSelf method +// - If an extension is registered for it, call that extension function +// - If it implements encoding.(Binary|Text|JSON)Marshaler, call its Marshal(Binary|Text|JSON) method +// - Else encode it based on its reflect.Kind +// +// Note that struct field names and keys in map[string]XXX will be treated as symbols. +// Some formats support symbols (e.g. binc) and will properly encode the string +// only once in the stream, and use a tag to refer to it thereafter. +func (e *Encoder) Encode(v interface{}) (err error) { + defer panicToErr(&err) + e.encode(v) + e.w.atEndOfEncode() + return +} + +// MustEncode is like Encode, but panics if unable to Encode. +// This provides insight to the code location that triggered the error. +func (e *Encoder) MustEncode(v interface{}) { + e.encode(v) + e.w.atEndOfEncode() +} + +func (e *Encoder) encode(iv interface{}) { + // if ics, ok := iv.(Selfer); ok { + // ics.CodecEncodeSelf(e) + // return + // } + + switch v := iv.(type) { + case nil: + e.e.EncodeNil() + case Selfer: + v.CodecEncodeSelf(e) + case Raw: + e.raw(v) + case reflect.Value: + e.encodeValue(v, nil) + + case string: + e.e.EncodeString(c_UTF8, v) + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + + case []uint8: + e.e.EncodeStringBytes(c_RAW, v) + + case *string: + e.e.EncodeString(c_UTF8, *v) + case *bool: + e.e.EncodeBool(*v) + case *int: + e.e.EncodeInt(int64(*v)) + case *int8: + e.e.EncodeInt(int64(*v)) + case *int16: + e.e.EncodeInt(int64(*v)) + case *int32: + e.e.EncodeInt(int64(*v)) + case *int64: + e.e.EncodeInt(*v) + case *uint: + e.e.EncodeUint(uint64(*v)) + case *uint8: + e.e.EncodeUint(uint64(*v)) + case *uint16: + e.e.EncodeUint(uint64(*v)) + case *uint32: + e.e.EncodeUint(uint64(*v)) + case *uint64: + e.e.EncodeUint(*v) + case *float32: + e.e.EncodeFloat32(*v) + case *float64: + e.e.EncodeFloat64(*v) + + case *[]uint8: + e.e.EncodeStringBytes(c_RAW, *v) + + default: + const checkCodecSelfer1 = true // in case T is passed, where *T is a Selfer, still checkCodecSelfer + if !fastpathEncodeTypeSwitch(iv, e) { + e.encodeI(iv, false, checkCodecSelfer1) + } + } +} + +func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, sptr uintptr, proceed bool) { + // use a goto statement instead of a recursive function for ptr/interface. +TOP: + switch rv.Kind() { + case reflect.Ptr: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + if e.h.CheckCircularRef && rv.Kind() == reflect.Struct { + // TODO: Movable pointers will be an issue here. Future problem. + sptr = rv.UnsafeAddr() + break TOP + } + goto TOP + case reflect.Interface: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + goto TOP + case reflect.Slice, reflect.Map: + if rv.IsNil() { + e.e.EncodeNil() + return + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + return + } + + proceed = true + rv2 = rv + return +} + +func (e *Encoder) doEncodeValue(rv reflect.Value, fn *encFn, sptr uintptr, + checkFastpath, checkCodecSelfer bool) { + if sptr != 0 { + if (&e.ci).add(sptr) { + e.errorf("circular reference found: # %d", sptr) + } + } + if fn == nil { + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() + // fn = e.getEncFn(rtid, rt, true, true) + fn = e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer) + } + fn.f(&fn.i, rv) + if sptr != 0 { + (&e.ci).remove(sptr) + } +} + +func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) { + if rv, sptr, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed { + e.doEncodeValue(rv, nil, sptr, checkFastpath, checkCodecSelfer) + } +} + +func (e *Encoder) encodeValue(rv reflect.Value, fn *encFn) { + // if a valid fn is passed, it MUST BE for the dereferenced type of rv + if rv, sptr, proceed := e.preEncodeValue(rv); proceed { + e.doEncodeValue(rv, fn, sptr, true, true) + } +} + +func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *encFn) { + // rtid := reflect.ValueOf(rt).Pointer() + var ok bool + if useMapForCodecCache { + fn, ok = e.f[rtid] + } else { + for i := range e.s { + v := &(e.s[i]) + if v.rtid == rtid { + fn, ok = &(v.fn), true + break + } + } + } + if ok { + return + } + + if useMapForCodecCache { + if e.f == nil { + e.f = make(map[uintptr]*encFn, initCollectionCap) + } + fn = new(encFn) + e.f[rtid] = fn + } else { + if e.s == nil { + e.s = make([]encRtidFn, 0, initCollectionCap) + } + e.s = append(e.s, encRtidFn{rtid: rtid}) + fn = &(e.s[len(e.s)-1]).fn + } + + ti := e.h.getTypeInfo(rtid, rt) + fi := &(fn.i) + fi.e = e + fi.ti = ti + + if checkCodecSelfer && ti.cs { + fn.f = (*encFnInfo).selferMarshal + } else if rtid == rawTypId { + fn.f = (*encFnInfo).raw + } else if rtid == rawExtTypId { + fn.f = (*encFnInfo).rawExt + } else if e.e.IsBuiltinType(rtid) { + fn.f = (*encFnInfo).builtin + } else if xfFn := e.h.getExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.f = (*encFnInfo).ext + } else if supportMarshalInterfaces && e.be && ti.bm { + fn.f = (*encFnInfo).binaryMarshal + } else if supportMarshalInterfaces && !e.be && e.js && ti.jm { + //If JSON, we should check JSONMarshal before textMarshal + fn.f = (*encFnInfo).jsonMarshal + } else if supportMarshalInterfaces && !e.be && ti.tm { + fn.f = (*encFnInfo).textMarshal + } else { + rk := rt.Kind() + if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { + if rt.PkgPath() == "" { // un-named slice or map + if idx := fastpathAV.index(rtid); idx != -1 { + fn.f = fastpathAV[idx].encfn + } + } else { + ok = false + // use mapping for underlying type if there + var rtu reflect.Type + if rk == reflect.Map { + rtu = reflect.MapOf(rt.Key(), rt.Elem()) + } else { + rtu = reflect.SliceOf(rt.Elem()) + } + rtuid := reflect.ValueOf(rtu).Pointer() + if idx := fastpathAV.index(rtuid); idx != -1 { + xfnf := fastpathAV[idx].encfn + xrt := fastpathAV[idx].rt + fn.f = func(xf *encFnInfo, xrv reflect.Value) { + xfnf(xf, xrv.Convert(xrt)) + } + } + } + } + if fn.f == nil { + switch rk { + case reflect.Bool: + fn.f = (*encFnInfo).kBool + case reflect.String: + fn.f = (*encFnInfo).kString + case reflect.Float64: + fn.f = (*encFnInfo).kFloat64 + case reflect.Float32: + fn.f = (*encFnInfo).kFloat32 + case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: + fn.f = (*encFnInfo).kInt + case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uintptr: + fn.f = (*encFnInfo).kUint + case reflect.Invalid: + fn.f = (*encFnInfo).kInvalid + case reflect.Chan: + fi.seq = seqTypeChan + fn.f = (*encFnInfo).kSlice + case reflect.Slice: + fi.seq = seqTypeSlice + fn.f = (*encFnInfo).kSlice + case reflect.Array: + fi.seq = seqTypeArray + fn.f = (*encFnInfo).kSlice + case reflect.Struct: + fn.f = (*encFnInfo).kStruct + // reflect.Ptr and reflect.Interface are handled already by preEncodeValue + // case reflect.Ptr: + // fn.f = (*encFnInfo).kPtr + // case reflect.Interface: + // fn.f = (*encFnInfo).kInterface + case reflect.Map: + fn.f = (*encFnInfo).kMap + default: + fn.f = (*encFnInfo).kErr + } + } + } + + return +} + +func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) { + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + e.e.EncodeNil() + } else if asis { + e.asis(bs) + } else { + e.e.EncodeStringBytes(c, bs) + } +} + +func (e *Encoder) asis(v []byte) { + if e.as == nil { + e.w.writeb(v) + } else { + e.as.EncodeAsis(v) + } +} + +func (e *Encoder) raw(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + e.errorf("Raw values cannot be encoded: %v", v) + } + if e.as == nil { + e.w.writeb(v) + } else { + e.as.EncodeAsis(v) + } +} + +func (e *Encoder) errorf(format string, params ...interface{}) { + err := fmt.Errorf(format, params...) + panic(err) +} + +// ---------------------------------------- + +const encStructPoolLen = 5 + +// encStructPool is an array of sync.Pool. +// Each element of the array pools one of encStructPool(8|16|32|64). +// It allows the re-use of slices up to 64 in length. +// A performance cost of encoding structs was collecting +// which values were empty and should be omitted. +// We needed slices of reflect.Value and string to collect them. +// This shared pool reduces the amount of unnecessary creation we do. +// The cost is that of locking sometimes, but sync.Pool is efficient +// enough to reduce thread contention. +var encStructPool [encStructPoolLen]sync.Pool + +func init() { + encStructPool[0].New = func() interface{} { return new([8]stringRv) } + encStructPool[1].New = func() interface{} { return new([16]stringRv) } + encStructPool[2].New = func() interface{} { return new([32]stringRv) } + encStructPool[3].New = func() interface{} { return new([64]stringRv) } + encStructPool[4].New = func() interface{} { return new([128]stringRv) } +} + +func encStructPoolGet(newlen int) (p *sync.Pool, v interface{}, s []stringRv) { + // if encStructPoolLen != 5 { // constant chec, so removed at build time. + // panic(errors.New("encStructPoolLen must be equal to 4")) // defensive, in case it is changed + // } + // idxpool := newlen / 8 + if newlen <= 8 { + p = &encStructPool[0] + v = p.Get() + s = v.(*[8]stringRv)[:newlen] + } else if newlen <= 16 { + p = &encStructPool[1] + v = p.Get() + s = v.(*[16]stringRv)[:newlen] + } else if newlen <= 32 { + p = &encStructPool[2] + v = p.Get() + s = v.(*[32]stringRv)[:newlen] + } else if newlen <= 64 { + p = &encStructPool[3] + v = p.Get() + s = v.(*[64]stringRv)[:newlen] + } else if newlen <= 128 { + p = &encStructPool[4] + v = p.Get() + s = v.(*[128]stringRv)[:newlen] + } else { + s = make([]stringRv, newlen) + } + return +} + +// ---------------------------------------- + +// func encErr(format string, params ...interface{}) { +// doPanic(msgTagEnc, format, params...) +// } diff --git a/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go new file mode 100644 index 000000000..f2e5d2dcf --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/fast-path.generated.go @@ -0,0 +1,39352 @@ +// +build !notfastpath + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl +// ************************************************************ + +package codec + +// Fast path functions try to create a fast path encode or decode implementation +// for common maps and slices. +// +// We define the functions and register then in this single file +// so as not to pollute the encode.go and decode.go, and create a dependency in there. +// This file can be omitted without causing a build failure. +// +// The advantage of fast paths is: +// - Many calls bypass reflection altogether +// +// Currently support +// - slice of all builtin types, +// - map of all builtin types to string or interface value +// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// This should provide adequate "typical" implementations. +// +// Note that fast track decode functions must handle values for which an address cannot be obtained. +// For example: +// m2 := map[string]int{} +// p2 := []interface{}{m2} +// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. +// + +import ( + "reflect" + "sort" +) + +const fastpathEnabled = true + +const fastpathCheckNilFalse = false // for reflect +const fastpathCheckNilTrue = true // for type switch + +type fastpathT struct{} + +var fastpathTV fastpathT + +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*encFnInfo, reflect.Value) + decfn func(*decFnInfo, reflect.Value) +} + +type fastpathA [271]fastpathE + +func (x *fastpathA) index(rtid uintptr) int { + // use binary search to grab the index (adapted from sort/search.go) + h, i, j := 0, 0, 271 // len(x) + for i < j { + h = i + (j-i)/2 + if x[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + } + if i < 271 && x[i].rtid == rtid { + return i + } + return -1 +} + +type fastpathAslice []fastpathE + +func (x fastpathAslice) Len() int { return len(x) } +func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } +func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +var fastpathAV fastpathA + +// due to possible initialization loop error, make fastpath in an init() +func init() { + i := 0 + fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { + xrt := reflect.TypeOf(v) + xptr := reflect.ValueOf(xrt).Pointer() + fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} + i++ + return + } + + fn([]interface{}(nil), (*encFnInfo).fastpathEncSliceIntfR, (*decFnInfo).fastpathDecSliceIntfR) + fn([]string(nil), (*encFnInfo).fastpathEncSliceStringR, (*decFnInfo).fastpathDecSliceStringR) + fn([]float32(nil), (*encFnInfo).fastpathEncSliceFloat32R, (*decFnInfo).fastpathDecSliceFloat32R) + fn([]float64(nil), (*encFnInfo).fastpathEncSliceFloat64R, (*decFnInfo).fastpathDecSliceFloat64R) + fn([]uint(nil), (*encFnInfo).fastpathEncSliceUintR, (*decFnInfo).fastpathDecSliceUintR) + fn([]uint16(nil), (*encFnInfo).fastpathEncSliceUint16R, (*decFnInfo).fastpathDecSliceUint16R) + fn([]uint32(nil), (*encFnInfo).fastpathEncSliceUint32R, (*decFnInfo).fastpathDecSliceUint32R) + fn([]uint64(nil), (*encFnInfo).fastpathEncSliceUint64R, (*decFnInfo).fastpathDecSliceUint64R) + fn([]uintptr(nil), (*encFnInfo).fastpathEncSliceUintptrR, (*decFnInfo).fastpathDecSliceUintptrR) + fn([]int(nil), (*encFnInfo).fastpathEncSliceIntR, (*decFnInfo).fastpathDecSliceIntR) + fn([]int8(nil), (*encFnInfo).fastpathEncSliceInt8R, (*decFnInfo).fastpathDecSliceInt8R) + fn([]int16(nil), (*encFnInfo).fastpathEncSliceInt16R, (*decFnInfo).fastpathDecSliceInt16R) + fn([]int32(nil), (*encFnInfo).fastpathEncSliceInt32R, (*decFnInfo).fastpathDecSliceInt32R) + fn([]int64(nil), (*encFnInfo).fastpathEncSliceInt64R, (*decFnInfo).fastpathDecSliceInt64R) + fn([]bool(nil), (*encFnInfo).fastpathEncSliceBoolR, (*decFnInfo).fastpathDecSliceBoolR) + + fn(map[interface{}]interface{}(nil), (*encFnInfo).fastpathEncMapIntfIntfR, (*decFnInfo).fastpathDecMapIntfIntfR) + fn(map[interface{}]string(nil), (*encFnInfo).fastpathEncMapIntfStringR, (*decFnInfo).fastpathDecMapIntfStringR) + fn(map[interface{}]uint(nil), (*encFnInfo).fastpathEncMapIntfUintR, (*decFnInfo).fastpathDecMapIntfUintR) + fn(map[interface{}]uint8(nil), (*encFnInfo).fastpathEncMapIntfUint8R, (*decFnInfo).fastpathDecMapIntfUint8R) + fn(map[interface{}]uint16(nil), (*encFnInfo).fastpathEncMapIntfUint16R, (*decFnInfo).fastpathDecMapIntfUint16R) + fn(map[interface{}]uint32(nil), (*encFnInfo).fastpathEncMapIntfUint32R, (*decFnInfo).fastpathDecMapIntfUint32R) + fn(map[interface{}]uint64(nil), (*encFnInfo).fastpathEncMapIntfUint64R, (*decFnInfo).fastpathDecMapIntfUint64R) + fn(map[interface{}]uintptr(nil), (*encFnInfo).fastpathEncMapIntfUintptrR, (*decFnInfo).fastpathDecMapIntfUintptrR) + fn(map[interface{}]int(nil), (*encFnInfo).fastpathEncMapIntfIntR, (*decFnInfo).fastpathDecMapIntfIntR) + fn(map[interface{}]int8(nil), (*encFnInfo).fastpathEncMapIntfInt8R, (*decFnInfo).fastpathDecMapIntfInt8R) + fn(map[interface{}]int16(nil), (*encFnInfo).fastpathEncMapIntfInt16R, (*decFnInfo).fastpathDecMapIntfInt16R) + fn(map[interface{}]int32(nil), (*encFnInfo).fastpathEncMapIntfInt32R, (*decFnInfo).fastpathDecMapIntfInt32R) + fn(map[interface{}]int64(nil), (*encFnInfo).fastpathEncMapIntfInt64R, (*decFnInfo).fastpathDecMapIntfInt64R) + fn(map[interface{}]float32(nil), (*encFnInfo).fastpathEncMapIntfFloat32R, (*decFnInfo).fastpathDecMapIntfFloat32R) + fn(map[interface{}]float64(nil), (*encFnInfo).fastpathEncMapIntfFloat64R, (*decFnInfo).fastpathDecMapIntfFloat64R) + fn(map[interface{}]bool(nil), (*encFnInfo).fastpathEncMapIntfBoolR, (*decFnInfo).fastpathDecMapIntfBoolR) + fn(map[string]interface{}(nil), (*encFnInfo).fastpathEncMapStringIntfR, (*decFnInfo).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*encFnInfo).fastpathEncMapStringStringR, (*decFnInfo).fastpathDecMapStringStringR) + fn(map[string]uint(nil), (*encFnInfo).fastpathEncMapStringUintR, (*decFnInfo).fastpathDecMapStringUintR) + fn(map[string]uint8(nil), (*encFnInfo).fastpathEncMapStringUint8R, (*decFnInfo).fastpathDecMapStringUint8R) + fn(map[string]uint16(nil), (*encFnInfo).fastpathEncMapStringUint16R, (*decFnInfo).fastpathDecMapStringUint16R) + fn(map[string]uint32(nil), (*encFnInfo).fastpathEncMapStringUint32R, (*decFnInfo).fastpathDecMapStringUint32R) + fn(map[string]uint64(nil), (*encFnInfo).fastpathEncMapStringUint64R, (*decFnInfo).fastpathDecMapStringUint64R) + fn(map[string]uintptr(nil), (*encFnInfo).fastpathEncMapStringUintptrR, (*decFnInfo).fastpathDecMapStringUintptrR) + fn(map[string]int(nil), (*encFnInfo).fastpathEncMapStringIntR, (*decFnInfo).fastpathDecMapStringIntR) + fn(map[string]int8(nil), (*encFnInfo).fastpathEncMapStringInt8R, (*decFnInfo).fastpathDecMapStringInt8R) + fn(map[string]int16(nil), (*encFnInfo).fastpathEncMapStringInt16R, (*decFnInfo).fastpathDecMapStringInt16R) + fn(map[string]int32(nil), (*encFnInfo).fastpathEncMapStringInt32R, (*decFnInfo).fastpathDecMapStringInt32R) + fn(map[string]int64(nil), (*encFnInfo).fastpathEncMapStringInt64R, (*decFnInfo).fastpathDecMapStringInt64R) + fn(map[string]float32(nil), (*encFnInfo).fastpathEncMapStringFloat32R, (*decFnInfo).fastpathDecMapStringFloat32R) + fn(map[string]float64(nil), (*encFnInfo).fastpathEncMapStringFloat64R, (*decFnInfo).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*encFnInfo).fastpathEncMapStringBoolR, (*decFnInfo).fastpathDecMapStringBoolR) + fn(map[float32]interface{}(nil), (*encFnInfo).fastpathEncMapFloat32IntfR, (*decFnInfo).fastpathDecMapFloat32IntfR) + fn(map[float32]string(nil), (*encFnInfo).fastpathEncMapFloat32StringR, (*decFnInfo).fastpathDecMapFloat32StringR) + fn(map[float32]uint(nil), (*encFnInfo).fastpathEncMapFloat32UintR, (*decFnInfo).fastpathDecMapFloat32UintR) + fn(map[float32]uint8(nil), (*encFnInfo).fastpathEncMapFloat32Uint8R, (*decFnInfo).fastpathDecMapFloat32Uint8R) + fn(map[float32]uint16(nil), (*encFnInfo).fastpathEncMapFloat32Uint16R, (*decFnInfo).fastpathDecMapFloat32Uint16R) + fn(map[float32]uint32(nil), (*encFnInfo).fastpathEncMapFloat32Uint32R, (*decFnInfo).fastpathDecMapFloat32Uint32R) + fn(map[float32]uint64(nil), (*encFnInfo).fastpathEncMapFloat32Uint64R, (*decFnInfo).fastpathDecMapFloat32Uint64R) + fn(map[float32]uintptr(nil), (*encFnInfo).fastpathEncMapFloat32UintptrR, (*decFnInfo).fastpathDecMapFloat32UintptrR) + fn(map[float32]int(nil), (*encFnInfo).fastpathEncMapFloat32IntR, (*decFnInfo).fastpathDecMapFloat32IntR) + fn(map[float32]int8(nil), (*encFnInfo).fastpathEncMapFloat32Int8R, (*decFnInfo).fastpathDecMapFloat32Int8R) + fn(map[float32]int16(nil), (*encFnInfo).fastpathEncMapFloat32Int16R, (*decFnInfo).fastpathDecMapFloat32Int16R) + fn(map[float32]int32(nil), (*encFnInfo).fastpathEncMapFloat32Int32R, (*decFnInfo).fastpathDecMapFloat32Int32R) + fn(map[float32]int64(nil), (*encFnInfo).fastpathEncMapFloat32Int64R, (*decFnInfo).fastpathDecMapFloat32Int64R) + fn(map[float32]float32(nil), (*encFnInfo).fastpathEncMapFloat32Float32R, (*decFnInfo).fastpathDecMapFloat32Float32R) + fn(map[float32]float64(nil), (*encFnInfo).fastpathEncMapFloat32Float64R, (*decFnInfo).fastpathDecMapFloat32Float64R) + fn(map[float32]bool(nil), (*encFnInfo).fastpathEncMapFloat32BoolR, (*decFnInfo).fastpathDecMapFloat32BoolR) + fn(map[float64]interface{}(nil), (*encFnInfo).fastpathEncMapFloat64IntfR, (*decFnInfo).fastpathDecMapFloat64IntfR) + fn(map[float64]string(nil), (*encFnInfo).fastpathEncMapFloat64StringR, (*decFnInfo).fastpathDecMapFloat64StringR) + fn(map[float64]uint(nil), (*encFnInfo).fastpathEncMapFloat64UintR, (*decFnInfo).fastpathDecMapFloat64UintR) + fn(map[float64]uint8(nil), (*encFnInfo).fastpathEncMapFloat64Uint8R, (*decFnInfo).fastpathDecMapFloat64Uint8R) + fn(map[float64]uint16(nil), (*encFnInfo).fastpathEncMapFloat64Uint16R, (*decFnInfo).fastpathDecMapFloat64Uint16R) + fn(map[float64]uint32(nil), (*encFnInfo).fastpathEncMapFloat64Uint32R, (*decFnInfo).fastpathDecMapFloat64Uint32R) + fn(map[float64]uint64(nil), (*encFnInfo).fastpathEncMapFloat64Uint64R, (*decFnInfo).fastpathDecMapFloat64Uint64R) + fn(map[float64]uintptr(nil), (*encFnInfo).fastpathEncMapFloat64UintptrR, (*decFnInfo).fastpathDecMapFloat64UintptrR) + fn(map[float64]int(nil), (*encFnInfo).fastpathEncMapFloat64IntR, (*decFnInfo).fastpathDecMapFloat64IntR) + fn(map[float64]int8(nil), (*encFnInfo).fastpathEncMapFloat64Int8R, (*decFnInfo).fastpathDecMapFloat64Int8R) + fn(map[float64]int16(nil), (*encFnInfo).fastpathEncMapFloat64Int16R, (*decFnInfo).fastpathDecMapFloat64Int16R) + fn(map[float64]int32(nil), (*encFnInfo).fastpathEncMapFloat64Int32R, (*decFnInfo).fastpathDecMapFloat64Int32R) + fn(map[float64]int64(nil), (*encFnInfo).fastpathEncMapFloat64Int64R, (*decFnInfo).fastpathDecMapFloat64Int64R) + fn(map[float64]float32(nil), (*encFnInfo).fastpathEncMapFloat64Float32R, (*decFnInfo).fastpathDecMapFloat64Float32R) + fn(map[float64]float64(nil), (*encFnInfo).fastpathEncMapFloat64Float64R, (*decFnInfo).fastpathDecMapFloat64Float64R) + fn(map[float64]bool(nil), (*encFnInfo).fastpathEncMapFloat64BoolR, (*decFnInfo).fastpathDecMapFloat64BoolR) + fn(map[uint]interface{}(nil), (*encFnInfo).fastpathEncMapUintIntfR, (*decFnInfo).fastpathDecMapUintIntfR) + fn(map[uint]string(nil), (*encFnInfo).fastpathEncMapUintStringR, (*decFnInfo).fastpathDecMapUintStringR) + fn(map[uint]uint(nil), (*encFnInfo).fastpathEncMapUintUintR, (*decFnInfo).fastpathDecMapUintUintR) + fn(map[uint]uint8(nil), (*encFnInfo).fastpathEncMapUintUint8R, (*decFnInfo).fastpathDecMapUintUint8R) + fn(map[uint]uint16(nil), (*encFnInfo).fastpathEncMapUintUint16R, (*decFnInfo).fastpathDecMapUintUint16R) + fn(map[uint]uint32(nil), (*encFnInfo).fastpathEncMapUintUint32R, (*decFnInfo).fastpathDecMapUintUint32R) + fn(map[uint]uint64(nil), (*encFnInfo).fastpathEncMapUintUint64R, (*decFnInfo).fastpathDecMapUintUint64R) + fn(map[uint]uintptr(nil), (*encFnInfo).fastpathEncMapUintUintptrR, (*decFnInfo).fastpathDecMapUintUintptrR) + fn(map[uint]int(nil), (*encFnInfo).fastpathEncMapUintIntR, (*decFnInfo).fastpathDecMapUintIntR) + fn(map[uint]int8(nil), (*encFnInfo).fastpathEncMapUintInt8R, (*decFnInfo).fastpathDecMapUintInt8R) + fn(map[uint]int16(nil), (*encFnInfo).fastpathEncMapUintInt16R, (*decFnInfo).fastpathDecMapUintInt16R) + fn(map[uint]int32(nil), (*encFnInfo).fastpathEncMapUintInt32R, (*decFnInfo).fastpathDecMapUintInt32R) + fn(map[uint]int64(nil), (*encFnInfo).fastpathEncMapUintInt64R, (*decFnInfo).fastpathDecMapUintInt64R) + fn(map[uint]float32(nil), (*encFnInfo).fastpathEncMapUintFloat32R, (*decFnInfo).fastpathDecMapUintFloat32R) + fn(map[uint]float64(nil), (*encFnInfo).fastpathEncMapUintFloat64R, (*decFnInfo).fastpathDecMapUintFloat64R) + fn(map[uint]bool(nil), (*encFnInfo).fastpathEncMapUintBoolR, (*decFnInfo).fastpathDecMapUintBoolR) + fn(map[uint8]interface{}(nil), (*encFnInfo).fastpathEncMapUint8IntfR, (*decFnInfo).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*encFnInfo).fastpathEncMapUint8StringR, (*decFnInfo).fastpathDecMapUint8StringR) + fn(map[uint8]uint(nil), (*encFnInfo).fastpathEncMapUint8UintR, (*decFnInfo).fastpathDecMapUint8UintR) + fn(map[uint8]uint8(nil), (*encFnInfo).fastpathEncMapUint8Uint8R, (*decFnInfo).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint16(nil), (*encFnInfo).fastpathEncMapUint8Uint16R, (*decFnInfo).fastpathDecMapUint8Uint16R) + fn(map[uint8]uint32(nil), (*encFnInfo).fastpathEncMapUint8Uint32R, (*decFnInfo).fastpathDecMapUint8Uint32R) + fn(map[uint8]uint64(nil), (*encFnInfo).fastpathEncMapUint8Uint64R, (*decFnInfo).fastpathDecMapUint8Uint64R) + fn(map[uint8]uintptr(nil), (*encFnInfo).fastpathEncMapUint8UintptrR, (*decFnInfo).fastpathDecMapUint8UintptrR) + fn(map[uint8]int(nil), (*encFnInfo).fastpathEncMapUint8IntR, (*decFnInfo).fastpathDecMapUint8IntR) + fn(map[uint8]int8(nil), (*encFnInfo).fastpathEncMapUint8Int8R, (*decFnInfo).fastpathDecMapUint8Int8R) + fn(map[uint8]int16(nil), (*encFnInfo).fastpathEncMapUint8Int16R, (*decFnInfo).fastpathDecMapUint8Int16R) + fn(map[uint8]int32(nil), (*encFnInfo).fastpathEncMapUint8Int32R, (*decFnInfo).fastpathDecMapUint8Int32R) + fn(map[uint8]int64(nil), (*encFnInfo).fastpathEncMapUint8Int64R, (*decFnInfo).fastpathDecMapUint8Int64R) + fn(map[uint8]float32(nil), (*encFnInfo).fastpathEncMapUint8Float32R, (*decFnInfo).fastpathDecMapUint8Float32R) + fn(map[uint8]float64(nil), (*encFnInfo).fastpathEncMapUint8Float64R, (*decFnInfo).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*encFnInfo).fastpathEncMapUint8BoolR, (*decFnInfo).fastpathDecMapUint8BoolR) + fn(map[uint16]interface{}(nil), (*encFnInfo).fastpathEncMapUint16IntfR, (*decFnInfo).fastpathDecMapUint16IntfR) + fn(map[uint16]string(nil), (*encFnInfo).fastpathEncMapUint16StringR, (*decFnInfo).fastpathDecMapUint16StringR) + fn(map[uint16]uint(nil), (*encFnInfo).fastpathEncMapUint16UintR, (*decFnInfo).fastpathDecMapUint16UintR) + fn(map[uint16]uint8(nil), (*encFnInfo).fastpathEncMapUint16Uint8R, (*decFnInfo).fastpathDecMapUint16Uint8R) + fn(map[uint16]uint16(nil), (*encFnInfo).fastpathEncMapUint16Uint16R, (*decFnInfo).fastpathDecMapUint16Uint16R) + fn(map[uint16]uint32(nil), (*encFnInfo).fastpathEncMapUint16Uint32R, (*decFnInfo).fastpathDecMapUint16Uint32R) + fn(map[uint16]uint64(nil), (*encFnInfo).fastpathEncMapUint16Uint64R, (*decFnInfo).fastpathDecMapUint16Uint64R) + fn(map[uint16]uintptr(nil), (*encFnInfo).fastpathEncMapUint16UintptrR, (*decFnInfo).fastpathDecMapUint16UintptrR) + fn(map[uint16]int(nil), (*encFnInfo).fastpathEncMapUint16IntR, (*decFnInfo).fastpathDecMapUint16IntR) + fn(map[uint16]int8(nil), (*encFnInfo).fastpathEncMapUint16Int8R, (*decFnInfo).fastpathDecMapUint16Int8R) + fn(map[uint16]int16(nil), (*encFnInfo).fastpathEncMapUint16Int16R, (*decFnInfo).fastpathDecMapUint16Int16R) + fn(map[uint16]int32(nil), (*encFnInfo).fastpathEncMapUint16Int32R, (*decFnInfo).fastpathDecMapUint16Int32R) + fn(map[uint16]int64(nil), (*encFnInfo).fastpathEncMapUint16Int64R, (*decFnInfo).fastpathDecMapUint16Int64R) + fn(map[uint16]float32(nil), (*encFnInfo).fastpathEncMapUint16Float32R, (*decFnInfo).fastpathDecMapUint16Float32R) + fn(map[uint16]float64(nil), (*encFnInfo).fastpathEncMapUint16Float64R, (*decFnInfo).fastpathDecMapUint16Float64R) + fn(map[uint16]bool(nil), (*encFnInfo).fastpathEncMapUint16BoolR, (*decFnInfo).fastpathDecMapUint16BoolR) + fn(map[uint32]interface{}(nil), (*encFnInfo).fastpathEncMapUint32IntfR, (*decFnInfo).fastpathDecMapUint32IntfR) + fn(map[uint32]string(nil), (*encFnInfo).fastpathEncMapUint32StringR, (*decFnInfo).fastpathDecMapUint32StringR) + fn(map[uint32]uint(nil), (*encFnInfo).fastpathEncMapUint32UintR, (*decFnInfo).fastpathDecMapUint32UintR) + fn(map[uint32]uint8(nil), (*encFnInfo).fastpathEncMapUint32Uint8R, (*decFnInfo).fastpathDecMapUint32Uint8R) + fn(map[uint32]uint16(nil), (*encFnInfo).fastpathEncMapUint32Uint16R, (*decFnInfo).fastpathDecMapUint32Uint16R) + fn(map[uint32]uint32(nil), (*encFnInfo).fastpathEncMapUint32Uint32R, (*decFnInfo).fastpathDecMapUint32Uint32R) + fn(map[uint32]uint64(nil), (*encFnInfo).fastpathEncMapUint32Uint64R, (*decFnInfo).fastpathDecMapUint32Uint64R) + fn(map[uint32]uintptr(nil), (*encFnInfo).fastpathEncMapUint32UintptrR, (*decFnInfo).fastpathDecMapUint32UintptrR) + fn(map[uint32]int(nil), (*encFnInfo).fastpathEncMapUint32IntR, (*decFnInfo).fastpathDecMapUint32IntR) + fn(map[uint32]int8(nil), (*encFnInfo).fastpathEncMapUint32Int8R, (*decFnInfo).fastpathDecMapUint32Int8R) + fn(map[uint32]int16(nil), (*encFnInfo).fastpathEncMapUint32Int16R, (*decFnInfo).fastpathDecMapUint32Int16R) + fn(map[uint32]int32(nil), (*encFnInfo).fastpathEncMapUint32Int32R, (*decFnInfo).fastpathDecMapUint32Int32R) + fn(map[uint32]int64(nil), (*encFnInfo).fastpathEncMapUint32Int64R, (*decFnInfo).fastpathDecMapUint32Int64R) + fn(map[uint32]float32(nil), (*encFnInfo).fastpathEncMapUint32Float32R, (*decFnInfo).fastpathDecMapUint32Float32R) + fn(map[uint32]float64(nil), (*encFnInfo).fastpathEncMapUint32Float64R, (*decFnInfo).fastpathDecMapUint32Float64R) + fn(map[uint32]bool(nil), (*encFnInfo).fastpathEncMapUint32BoolR, (*decFnInfo).fastpathDecMapUint32BoolR) + fn(map[uint64]interface{}(nil), (*encFnInfo).fastpathEncMapUint64IntfR, (*decFnInfo).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*encFnInfo).fastpathEncMapUint64StringR, (*decFnInfo).fastpathDecMapUint64StringR) + fn(map[uint64]uint(nil), (*encFnInfo).fastpathEncMapUint64UintR, (*decFnInfo).fastpathDecMapUint64UintR) + fn(map[uint64]uint8(nil), (*encFnInfo).fastpathEncMapUint64Uint8R, (*decFnInfo).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint16(nil), (*encFnInfo).fastpathEncMapUint64Uint16R, (*decFnInfo).fastpathDecMapUint64Uint16R) + fn(map[uint64]uint32(nil), (*encFnInfo).fastpathEncMapUint64Uint32R, (*decFnInfo).fastpathDecMapUint64Uint32R) + fn(map[uint64]uint64(nil), (*encFnInfo).fastpathEncMapUint64Uint64R, (*decFnInfo).fastpathDecMapUint64Uint64R) + fn(map[uint64]uintptr(nil), (*encFnInfo).fastpathEncMapUint64UintptrR, (*decFnInfo).fastpathDecMapUint64UintptrR) + fn(map[uint64]int(nil), (*encFnInfo).fastpathEncMapUint64IntR, (*decFnInfo).fastpathDecMapUint64IntR) + fn(map[uint64]int8(nil), (*encFnInfo).fastpathEncMapUint64Int8R, (*decFnInfo).fastpathDecMapUint64Int8R) + fn(map[uint64]int16(nil), (*encFnInfo).fastpathEncMapUint64Int16R, (*decFnInfo).fastpathDecMapUint64Int16R) + fn(map[uint64]int32(nil), (*encFnInfo).fastpathEncMapUint64Int32R, (*decFnInfo).fastpathDecMapUint64Int32R) + fn(map[uint64]int64(nil), (*encFnInfo).fastpathEncMapUint64Int64R, (*decFnInfo).fastpathDecMapUint64Int64R) + fn(map[uint64]float32(nil), (*encFnInfo).fastpathEncMapUint64Float32R, (*decFnInfo).fastpathDecMapUint64Float32R) + fn(map[uint64]float64(nil), (*encFnInfo).fastpathEncMapUint64Float64R, (*decFnInfo).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*encFnInfo).fastpathEncMapUint64BoolR, (*decFnInfo).fastpathDecMapUint64BoolR) + fn(map[uintptr]interface{}(nil), (*encFnInfo).fastpathEncMapUintptrIntfR, (*decFnInfo).fastpathDecMapUintptrIntfR) + fn(map[uintptr]string(nil), (*encFnInfo).fastpathEncMapUintptrStringR, (*decFnInfo).fastpathDecMapUintptrStringR) + fn(map[uintptr]uint(nil), (*encFnInfo).fastpathEncMapUintptrUintR, (*decFnInfo).fastpathDecMapUintptrUintR) + fn(map[uintptr]uint8(nil), (*encFnInfo).fastpathEncMapUintptrUint8R, (*decFnInfo).fastpathDecMapUintptrUint8R) + fn(map[uintptr]uint16(nil), (*encFnInfo).fastpathEncMapUintptrUint16R, (*decFnInfo).fastpathDecMapUintptrUint16R) + fn(map[uintptr]uint32(nil), (*encFnInfo).fastpathEncMapUintptrUint32R, (*decFnInfo).fastpathDecMapUintptrUint32R) + fn(map[uintptr]uint64(nil), (*encFnInfo).fastpathEncMapUintptrUint64R, (*decFnInfo).fastpathDecMapUintptrUint64R) + fn(map[uintptr]uintptr(nil), (*encFnInfo).fastpathEncMapUintptrUintptrR, (*decFnInfo).fastpathDecMapUintptrUintptrR) + fn(map[uintptr]int(nil), (*encFnInfo).fastpathEncMapUintptrIntR, (*decFnInfo).fastpathDecMapUintptrIntR) + fn(map[uintptr]int8(nil), (*encFnInfo).fastpathEncMapUintptrInt8R, (*decFnInfo).fastpathDecMapUintptrInt8R) + fn(map[uintptr]int16(nil), (*encFnInfo).fastpathEncMapUintptrInt16R, (*decFnInfo).fastpathDecMapUintptrInt16R) + fn(map[uintptr]int32(nil), (*encFnInfo).fastpathEncMapUintptrInt32R, (*decFnInfo).fastpathDecMapUintptrInt32R) + fn(map[uintptr]int64(nil), (*encFnInfo).fastpathEncMapUintptrInt64R, (*decFnInfo).fastpathDecMapUintptrInt64R) + fn(map[uintptr]float32(nil), (*encFnInfo).fastpathEncMapUintptrFloat32R, (*decFnInfo).fastpathDecMapUintptrFloat32R) + fn(map[uintptr]float64(nil), (*encFnInfo).fastpathEncMapUintptrFloat64R, (*decFnInfo).fastpathDecMapUintptrFloat64R) + fn(map[uintptr]bool(nil), (*encFnInfo).fastpathEncMapUintptrBoolR, (*decFnInfo).fastpathDecMapUintptrBoolR) + fn(map[int]interface{}(nil), (*encFnInfo).fastpathEncMapIntIntfR, (*decFnInfo).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*encFnInfo).fastpathEncMapIntStringR, (*decFnInfo).fastpathDecMapIntStringR) + fn(map[int]uint(nil), (*encFnInfo).fastpathEncMapIntUintR, (*decFnInfo).fastpathDecMapIntUintR) + fn(map[int]uint8(nil), (*encFnInfo).fastpathEncMapIntUint8R, (*decFnInfo).fastpathDecMapIntUint8R) + fn(map[int]uint16(nil), (*encFnInfo).fastpathEncMapIntUint16R, (*decFnInfo).fastpathDecMapIntUint16R) + fn(map[int]uint32(nil), (*encFnInfo).fastpathEncMapIntUint32R, (*decFnInfo).fastpathDecMapIntUint32R) + fn(map[int]uint64(nil), (*encFnInfo).fastpathEncMapIntUint64R, (*decFnInfo).fastpathDecMapIntUint64R) + fn(map[int]uintptr(nil), (*encFnInfo).fastpathEncMapIntUintptrR, (*decFnInfo).fastpathDecMapIntUintptrR) + fn(map[int]int(nil), (*encFnInfo).fastpathEncMapIntIntR, (*decFnInfo).fastpathDecMapIntIntR) + fn(map[int]int8(nil), (*encFnInfo).fastpathEncMapIntInt8R, (*decFnInfo).fastpathDecMapIntInt8R) + fn(map[int]int16(nil), (*encFnInfo).fastpathEncMapIntInt16R, (*decFnInfo).fastpathDecMapIntInt16R) + fn(map[int]int32(nil), (*encFnInfo).fastpathEncMapIntInt32R, (*decFnInfo).fastpathDecMapIntInt32R) + fn(map[int]int64(nil), (*encFnInfo).fastpathEncMapIntInt64R, (*decFnInfo).fastpathDecMapIntInt64R) + fn(map[int]float32(nil), (*encFnInfo).fastpathEncMapIntFloat32R, (*decFnInfo).fastpathDecMapIntFloat32R) + fn(map[int]float64(nil), (*encFnInfo).fastpathEncMapIntFloat64R, (*decFnInfo).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*encFnInfo).fastpathEncMapIntBoolR, (*decFnInfo).fastpathDecMapIntBoolR) + fn(map[int8]interface{}(nil), (*encFnInfo).fastpathEncMapInt8IntfR, (*decFnInfo).fastpathDecMapInt8IntfR) + fn(map[int8]string(nil), (*encFnInfo).fastpathEncMapInt8StringR, (*decFnInfo).fastpathDecMapInt8StringR) + fn(map[int8]uint(nil), (*encFnInfo).fastpathEncMapInt8UintR, (*decFnInfo).fastpathDecMapInt8UintR) + fn(map[int8]uint8(nil), (*encFnInfo).fastpathEncMapInt8Uint8R, (*decFnInfo).fastpathDecMapInt8Uint8R) + fn(map[int8]uint16(nil), (*encFnInfo).fastpathEncMapInt8Uint16R, (*decFnInfo).fastpathDecMapInt8Uint16R) + fn(map[int8]uint32(nil), (*encFnInfo).fastpathEncMapInt8Uint32R, (*decFnInfo).fastpathDecMapInt8Uint32R) + fn(map[int8]uint64(nil), (*encFnInfo).fastpathEncMapInt8Uint64R, (*decFnInfo).fastpathDecMapInt8Uint64R) + fn(map[int8]uintptr(nil), (*encFnInfo).fastpathEncMapInt8UintptrR, (*decFnInfo).fastpathDecMapInt8UintptrR) + fn(map[int8]int(nil), (*encFnInfo).fastpathEncMapInt8IntR, (*decFnInfo).fastpathDecMapInt8IntR) + fn(map[int8]int8(nil), (*encFnInfo).fastpathEncMapInt8Int8R, (*decFnInfo).fastpathDecMapInt8Int8R) + fn(map[int8]int16(nil), (*encFnInfo).fastpathEncMapInt8Int16R, (*decFnInfo).fastpathDecMapInt8Int16R) + fn(map[int8]int32(nil), (*encFnInfo).fastpathEncMapInt8Int32R, (*decFnInfo).fastpathDecMapInt8Int32R) + fn(map[int8]int64(nil), (*encFnInfo).fastpathEncMapInt8Int64R, (*decFnInfo).fastpathDecMapInt8Int64R) + fn(map[int8]float32(nil), (*encFnInfo).fastpathEncMapInt8Float32R, (*decFnInfo).fastpathDecMapInt8Float32R) + fn(map[int8]float64(nil), (*encFnInfo).fastpathEncMapInt8Float64R, (*decFnInfo).fastpathDecMapInt8Float64R) + fn(map[int8]bool(nil), (*encFnInfo).fastpathEncMapInt8BoolR, (*decFnInfo).fastpathDecMapInt8BoolR) + fn(map[int16]interface{}(nil), (*encFnInfo).fastpathEncMapInt16IntfR, (*decFnInfo).fastpathDecMapInt16IntfR) + fn(map[int16]string(nil), (*encFnInfo).fastpathEncMapInt16StringR, (*decFnInfo).fastpathDecMapInt16StringR) + fn(map[int16]uint(nil), (*encFnInfo).fastpathEncMapInt16UintR, (*decFnInfo).fastpathDecMapInt16UintR) + fn(map[int16]uint8(nil), (*encFnInfo).fastpathEncMapInt16Uint8R, (*decFnInfo).fastpathDecMapInt16Uint8R) + fn(map[int16]uint16(nil), (*encFnInfo).fastpathEncMapInt16Uint16R, (*decFnInfo).fastpathDecMapInt16Uint16R) + fn(map[int16]uint32(nil), (*encFnInfo).fastpathEncMapInt16Uint32R, (*decFnInfo).fastpathDecMapInt16Uint32R) + fn(map[int16]uint64(nil), (*encFnInfo).fastpathEncMapInt16Uint64R, (*decFnInfo).fastpathDecMapInt16Uint64R) + fn(map[int16]uintptr(nil), (*encFnInfo).fastpathEncMapInt16UintptrR, (*decFnInfo).fastpathDecMapInt16UintptrR) + fn(map[int16]int(nil), (*encFnInfo).fastpathEncMapInt16IntR, (*decFnInfo).fastpathDecMapInt16IntR) + fn(map[int16]int8(nil), (*encFnInfo).fastpathEncMapInt16Int8R, (*decFnInfo).fastpathDecMapInt16Int8R) + fn(map[int16]int16(nil), (*encFnInfo).fastpathEncMapInt16Int16R, (*decFnInfo).fastpathDecMapInt16Int16R) + fn(map[int16]int32(nil), (*encFnInfo).fastpathEncMapInt16Int32R, (*decFnInfo).fastpathDecMapInt16Int32R) + fn(map[int16]int64(nil), (*encFnInfo).fastpathEncMapInt16Int64R, (*decFnInfo).fastpathDecMapInt16Int64R) + fn(map[int16]float32(nil), (*encFnInfo).fastpathEncMapInt16Float32R, (*decFnInfo).fastpathDecMapInt16Float32R) + fn(map[int16]float64(nil), (*encFnInfo).fastpathEncMapInt16Float64R, (*decFnInfo).fastpathDecMapInt16Float64R) + fn(map[int16]bool(nil), (*encFnInfo).fastpathEncMapInt16BoolR, (*decFnInfo).fastpathDecMapInt16BoolR) + fn(map[int32]interface{}(nil), (*encFnInfo).fastpathEncMapInt32IntfR, (*decFnInfo).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*encFnInfo).fastpathEncMapInt32StringR, (*decFnInfo).fastpathDecMapInt32StringR) + fn(map[int32]uint(nil), (*encFnInfo).fastpathEncMapInt32UintR, (*decFnInfo).fastpathDecMapInt32UintR) + fn(map[int32]uint8(nil), (*encFnInfo).fastpathEncMapInt32Uint8R, (*decFnInfo).fastpathDecMapInt32Uint8R) + fn(map[int32]uint16(nil), (*encFnInfo).fastpathEncMapInt32Uint16R, (*decFnInfo).fastpathDecMapInt32Uint16R) + fn(map[int32]uint32(nil), (*encFnInfo).fastpathEncMapInt32Uint32R, (*decFnInfo).fastpathDecMapInt32Uint32R) + fn(map[int32]uint64(nil), (*encFnInfo).fastpathEncMapInt32Uint64R, (*decFnInfo).fastpathDecMapInt32Uint64R) + fn(map[int32]uintptr(nil), (*encFnInfo).fastpathEncMapInt32UintptrR, (*decFnInfo).fastpathDecMapInt32UintptrR) + fn(map[int32]int(nil), (*encFnInfo).fastpathEncMapInt32IntR, (*decFnInfo).fastpathDecMapInt32IntR) + fn(map[int32]int8(nil), (*encFnInfo).fastpathEncMapInt32Int8R, (*decFnInfo).fastpathDecMapInt32Int8R) + fn(map[int32]int16(nil), (*encFnInfo).fastpathEncMapInt32Int16R, (*decFnInfo).fastpathDecMapInt32Int16R) + fn(map[int32]int32(nil), (*encFnInfo).fastpathEncMapInt32Int32R, (*decFnInfo).fastpathDecMapInt32Int32R) + fn(map[int32]int64(nil), (*encFnInfo).fastpathEncMapInt32Int64R, (*decFnInfo).fastpathDecMapInt32Int64R) + fn(map[int32]float32(nil), (*encFnInfo).fastpathEncMapInt32Float32R, (*decFnInfo).fastpathDecMapInt32Float32R) + fn(map[int32]float64(nil), (*encFnInfo).fastpathEncMapInt32Float64R, (*decFnInfo).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*encFnInfo).fastpathEncMapInt32BoolR, (*decFnInfo).fastpathDecMapInt32BoolR) + fn(map[int64]interface{}(nil), (*encFnInfo).fastpathEncMapInt64IntfR, (*decFnInfo).fastpathDecMapInt64IntfR) + fn(map[int64]string(nil), (*encFnInfo).fastpathEncMapInt64StringR, (*decFnInfo).fastpathDecMapInt64StringR) + fn(map[int64]uint(nil), (*encFnInfo).fastpathEncMapInt64UintR, (*decFnInfo).fastpathDecMapInt64UintR) + fn(map[int64]uint8(nil), (*encFnInfo).fastpathEncMapInt64Uint8R, (*decFnInfo).fastpathDecMapInt64Uint8R) + fn(map[int64]uint16(nil), (*encFnInfo).fastpathEncMapInt64Uint16R, (*decFnInfo).fastpathDecMapInt64Uint16R) + fn(map[int64]uint32(nil), (*encFnInfo).fastpathEncMapInt64Uint32R, (*decFnInfo).fastpathDecMapInt64Uint32R) + fn(map[int64]uint64(nil), (*encFnInfo).fastpathEncMapInt64Uint64R, (*decFnInfo).fastpathDecMapInt64Uint64R) + fn(map[int64]uintptr(nil), (*encFnInfo).fastpathEncMapInt64UintptrR, (*decFnInfo).fastpathDecMapInt64UintptrR) + fn(map[int64]int(nil), (*encFnInfo).fastpathEncMapInt64IntR, (*decFnInfo).fastpathDecMapInt64IntR) + fn(map[int64]int8(nil), (*encFnInfo).fastpathEncMapInt64Int8R, (*decFnInfo).fastpathDecMapInt64Int8R) + fn(map[int64]int16(nil), (*encFnInfo).fastpathEncMapInt64Int16R, (*decFnInfo).fastpathDecMapInt64Int16R) + fn(map[int64]int32(nil), (*encFnInfo).fastpathEncMapInt64Int32R, (*decFnInfo).fastpathDecMapInt64Int32R) + fn(map[int64]int64(nil), (*encFnInfo).fastpathEncMapInt64Int64R, (*decFnInfo).fastpathDecMapInt64Int64R) + fn(map[int64]float32(nil), (*encFnInfo).fastpathEncMapInt64Float32R, (*decFnInfo).fastpathDecMapInt64Float32R) + fn(map[int64]float64(nil), (*encFnInfo).fastpathEncMapInt64Float64R, (*decFnInfo).fastpathDecMapInt64Float64R) + fn(map[int64]bool(nil), (*encFnInfo).fastpathEncMapInt64BoolR, (*decFnInfo).fastpathDecMapInt64BoolR) + fn(map[bool]interface{}(nil), (*encFnInfo).fastpathEncMapBoolIntfR, (*decFnInfo).fastpathDecMapBoolIntfR) + fn(map[bool]string(nil), (*encFnInfo).fastpathEncMapBoolStringR, (*decFnInfo).fastpathDecMapBoolStringR) + fn(map[bool]uint(nil), (*encFnInfo).fastpathEncMapBoolUintR, (*decFnInfo).fastpathDecMapBoolUintR) + fn(map[bool]uint8(nil), (*encFnInfo).fastpathEncMapBoolUint8R, (*decFnInfo).fastpathDecMapBoolUint8R) + fn(map[bool]uint16(nil), (*encFnInfo).fastpathEncMapBoolUint16R, (*decFnInfo).fastpathDecMapBoolUint16R) + fn(map[bool]uint32(nil), (*encFnInfo).fastpathEncMapBoolUint32R, (*decFnInfo).fastpathDecMapBoolUint32R) + fn(map[bool]uint64(nil), (*encFnInfo).fastpathEncMapBoolUint64R, (*decFnInfo).fastpathDecMapBoolUint64R) + fn(map[bool]uintptr(nil), (*encFnInfo).fastpathEncMapBoolUintptrR, (*decFnInfo).fastpathDecMapBoolUintptrR) + fn(map[bool]int(nil), (*encFnInfo).fastpathEncMapBoolIntR, (*decFnInfo).fastpathDecMapBoolIntR) + fn(map[bool]int8(nil), (*encFnInfo).fastpathEncMapBoolInt8R, (*decFnInfo).fastpathDecMapBoolInt8R) + fn(map[bool]int16(nil), (*encFnInfo).fastpathEncMapBoolInt16R, (*decFnInfo).fastpathDecMapBoolInt16R) + fn(map[bool]int32(nil), (*encFnInfo).fastpathEncMapBoolInt32R, (*decFnInfo).fastpathDecMapBoolInt32R) + fn(map[bool]int64(nil), (*encFnInfo).fastpathEncMapBoolInt64R, (*decFnInfo).fastpathDecMapBoolInt64R) + fn(map[bool]float32(nil), (*encFnInfo).fastpathEncMapBoolFloat32R, (*decFnInfo).fastpathDecMapBoolFloat32R) + fn(map[bool]float64(nil), (*encFnInfo).fastpathEncMapBoolFloat64R, (*decFnInfo).fastpathDecMapBoolFloat64R) + fn(map[bool]bool(nil), (*encFnInfo).fastpathEncMapBoolBoolR, (*decFnInfo).fastpathDecMapBoolBoolR) + + sort.Sort(fastpathAslice(fastpathAV[:])) +} + +// -- encode + +// -- -- fast path type switch +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { + + case []interface{}: + fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e) + case *[]interface{}: + fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e) + case *map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]string: + fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e) + case *map[interface{}]string: + fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint: + fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint: + fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e) + case *map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int: + fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e) + case *map[interface{}]int: + fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e) + case *map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e) + + case []string: + fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e) + case *[]string: + fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e) + + case map[string]interface{}: + fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e) + case *map[string]interface{}: + fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e) + + case map[string]string: + fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e) + case *map[string]string: + fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e) + + case map[string]uint: + fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e) + case *map[string]uint: + fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e) + + case map[string]uint8: + fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e) + case *map[string]uint8: + fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e) + + case map[string]uint16: + fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e) + case *map[string]uint16: + fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e) + + case map[string]uint32: + fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e) + case *map[string]uint32: + fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e) + + case map[string]uint64: + fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e) + case *map[string]uint64: + fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e) + + case map[string]uintptr: + fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e) + case *map[string]uintptr: + fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e) + + case map[string]int: + fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e) + case *map[string]int: + fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e) + + case map[string]int8: + fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e) + case *map[string]int8: + fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e) + + case map[string]int16: + fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e) + case *map[string]int16: + fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e) + + case map[string]int32: + fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e) + case *map[string]int32: + fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e) + + case map[string]int64: + fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e) + case *map[string]int64: + fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e) + + case map[string]float32: + fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e) + case *map[string]float32: + fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e) + + case map[string]float64: + fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e) + case *map[string]float64: + fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e) + + case map[string]bool: + fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e) + case *map[string]bool: + fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e) + + case []float32: + fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e) + case *[]float32: + fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e) + + case map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e) + case *map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e) + + case map[float32]string: + fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e) + case *map[float32]string: + fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e) + + case map[float32]uint: + fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e) + case *map[float32]uint: + fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e) + + case map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e) + case *map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e) + case *map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e) + case *map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e) + case *map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e) + case *map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[float32]int: + fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e) + case *map[float32]int: + fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e) + + case map[float32]int8: + fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e) + case *map[float32]int8: + fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e) + + case map[float32]int16: + fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e) + case *map[float32]int16: + fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e) + + case map[float32]int32: + fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e) + case *map[float32]int32: + fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e) + + case map[float32]int64: + fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e) + case *map[float32]int64: + fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e) + + case map[float32]float32: + fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e) + case *map[float32]float32: + fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e) + + case map[float32]float64: + fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e) + case *map[float32]float64: + fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e) + + case map[float32]bool: + fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e) + case *map[float32]bool: + fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e) + + case []float64: + fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e) + case *[]float64: + fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e) + + case map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e) + case *map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e) + + case map[float64]string: + fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e) + case *map[float64]string: + fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e) + + case map[float64]uint: + fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e) + case *map[float64]uint: + fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e) + + case map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e) + case *map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e) + case *map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e) + case *map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e) + case *map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e) + case *map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[float64]int: + fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e) + case *map[float64]int: + fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e) + + case map[float64]int8: + fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e) + case *map[float64]int8: + fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e) + + case map[float64]int16: + fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e) + case *map[float64]int16: + fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e) + + case map[float64]int32: + fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e) + case *map[float64]int32: + fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e) + + case map[float64]int64: + fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e) + case *map[float64]int64: + fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e) + + case map[float64]float32: + fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e) + case *map[float64]float32: + fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e) + + case map[float64]float64: + fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e) + case *map[float64]float64: + fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e) + + case map[float64]bool: + fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e) + case *map[float64]bool: + fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e) + + case []uint: + fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e) + case *[]uint: + fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e) + + case map[uint]interface{}: + fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e) + case *map[uint]interface{}: + fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e) + + case map[uint]string: + fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e) + case *map[uint]string: + fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e) + + case map[uint]uint: + fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e) + case *map[uint]uint: + fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e) + + case map[uint]uint8: + fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e) + case *map[uint]uint8: + fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint16: + fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e) + case *map[uint]uint16: + fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint32: + fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e) + case *map[uint]uint32: + fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint64: + fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e) + case *map[uint]uint64: + fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e) + + case map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e) + case *map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint]int: + fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e) + case *map[uint]int: + fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e) + + case map[uint]int8: + fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e) + case *map[uint]int8: + fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e) + + case map[uint]int16: + fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e) + case *map[uint]int16: + fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e) + + case map[uint]int32: + fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e) + case *map[uint]int32: + fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e) + + case map[uint]int64: + fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e) + case *map[uint]int64: + fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e) + + case map[uint]float32: + fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e) + case *map[uint]float32: + fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e) + + case map[uint]float64: + fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e) + case *map[uint]float64: + fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e) + + case map[uint]bool: + fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e) + case *map[uint]bool: + fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e) + + case map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e) + case *map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint8]string: + fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e) + case *map[uint8]string: + fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint: + fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e) + case *map[uint8]uint: + fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint8]int: + fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e) + case *map[uint8]int: + fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e) + + case map[uint8]int8: + fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e) + case *map[uint8]int8: + fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int16: + fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e) + case *map[uint8]int16: + fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int32: + fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e) + case *map[uint8]int32: + fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int64: + fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e) + case *map[uint8]int64: + fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]float32: + fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e) + case *map[uint8]float32: + fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]float64: + fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e) + case *map[uint8]float64: + fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]bool: + fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e) + case *map[uint8]bool: + fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e) + + case []uint16: + fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e) + case *[]uint16: + fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e) + + case map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e) + case *map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint16]string: + fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e) + case *map[uint16]string: + fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint: + fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e) + case *map[uint16]uint: + fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint16]int: + fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e) + case *map[uint16]int: + fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e) + + case map[uint16]int8: + fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e) + case *map[uint16]int8: + fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int16: + fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e) + case *map[uint16]int16: + fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int32: + fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e) + case *map[uint16]int32: + fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int64: + fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e) + case *map[uint16]int64: + fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]float32: + fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e) + case *map[uint16]float32: + fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]float64: + fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e) + case *map[uint16]float64: + fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]bool: + fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e) + case *map[uint16]bool: + fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e) + + case []uint32: + fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e) + case *[]uint32: + fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e) + case *map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint32]string: + fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e) + case *map[uint32]string: + fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint: + fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e) + case *map[uint32]uint: + fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint32]int: + fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e) + case *map[uint32]int: + fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e) + + case map[uint32]int8: + fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e) + case *map[uint32]int8: + fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int16: + fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e) + case *map[uint32]int16: + fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int32: + fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e) + case *map[uint32]int32: + fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int64: + fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e) + case *map[uint32]int64: + fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]float32: + fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e) + case *map[uint32]float32: + fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]float64: + fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e) + case *map[uint32]float64: + fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]bool: + fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e) + case *map[uint32]bool: + fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e) + + case []uint64: + fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e) + case *[]uint64: + fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e) + case *map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint64]string: + fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e) + case *map[uint64]string: + fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint: + fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e) + case *map[uint64]uint: + fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint64]int: + fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e) + case *map[uint64]int: + fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e) + + case map[uint64]int8: + fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e) + case *map[uint64]int8: + fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int16: + fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e) + case *map[uint64]int16: + fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int32: + fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e) + case *map[uint64]int32: + fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int64: + fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e) + case *map[uint64]int64: + fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]float32: + fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e) + case *map[uint64]float32: + fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]float64: + fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e) + case *map[uint64]float64: + fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]bool: + fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e) + case *map[uint64]bool: + fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e) + + case []uintptr: + fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e) + case *[]uintptr: + fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e) + case *map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]string: + fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e) + case *map[uintptr]string: + fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e) + case *map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int: + fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e) + case *map[uintptr]int: + fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e) + case *map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e) + + case []int: + fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e) + case *[]int: + fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e) + + case map[int]interface{}: + fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e) + case *map[int]interface{}: + fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e) + + case map[int]string: + fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e) + case *map[int]string: + fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e) + + case map[int]uint: + fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e) + case *map[int]uint: + fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e) + + case map[int]uint8: + fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e) + case *map[int]uint8: + fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e) + + case map[int]uint16: + fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e) + case *map[int]uint16: + fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e) + + case map[int]uint32: + fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e) + case *map[int]uint32: + fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e) + + case map[int]uint64: + fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e) + case *map[int]uint64: + fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e) + + case map[int]uintptr: + fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e) + case *map[int]uintptr: + fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e) + + case map[int]int: + fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e) + case *map[int]int: + fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e) + + case map[int]int8: + fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e) + case *map[int]int8: + fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e) + + case map[int]int16: + fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e) + case *map[int]int16: + fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e) + + case map[int]int32: + fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e) + case *map[int]int32: + fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e) + + case map[int]int64: + fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e) + case *map[int]int64: + fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e) + + case map[int]float32: + fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e) + case *map[int]float32: + fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e) + + case map[int]float64: + fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e) + case *map[int]float64: + fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e) + + case map[int]bool: + fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e) + case *map[int]bool: + fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e) + + case []int8: + fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e) + case *[]int8: + fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e) + + case map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e) + case *map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e) + + case map[int8]string: + fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e) + case *map[int8]string: + fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e) + + case map[int8]uint: + fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e) + case *map[int8]uint: + fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e) + + case map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e) + case *map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e) + case *map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e) + case *map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e) + case *map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e) + case *map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int8]int: + fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e) + case *map[int8]int: + fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e) + + case map[int8]int8: + fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e) + case *map[int8]int8: + fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e) + + case map[int8]int16: + fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e) + case *map[int8]int16: + fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e) + + case map[int8]int32: + fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e) + case *map[int8]int32: + fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e) + + case map[int8]int64: + fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e) + case *map[int8]int64: + fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e) + + case map[int8]float32: + fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e) + case *map[int8]float32: + fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e) + + case map[int8]float64: + fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e) + case *map[int8]float64: + fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e) + + case map[int8]bool: + fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e) + case *map[int8]bool: + fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e) + + case []int16: + fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e) + case *[]int16: + fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e) + + case map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e) + case *map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e) + + case map[int16]string: + fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e) + case *map[int16]string: + fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e) + + case map[int16]uint: + fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e) + case *map[int16]uint: + fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e) + + case map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e) + case *map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e) + case *map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e) + case *map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e) + case *map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e) + case *map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int16]int: + fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e) + case *map[int16]int: + fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e) + + case map[int16]int8: + fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e) + case *map[int16]int8: + fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e) + + case map[int16]int16: + fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e) + case *map[int16]int16: + fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e) + + case map[int16]int32: + fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e) + case *map[int16]int32: + fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e) + + case map[int16]int64: + fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e) + case *map[int16]int64: + fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e) + + case map[int16]float32: + fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e) + case *map[int16]float32: + fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e) + + case map[int16]float64: + fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e) + case *map[int16]float64: + fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e) + + case map[int16]bool: + fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e) + case *map[int16]bool: + fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e) + + case []int32: + fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e) + case *[]int32: + fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e) + + case map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e) + case *map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e) + + case map[int32]string: + fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e) + case *map[int32]string: + fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e) + + case map[int32]uint: + fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e) + case *map[int32]uint: + fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e) + + case map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e) + case *map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e) + case *map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e) + case *map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e) + case *map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e) + case *map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int32]int: + fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e) + case *map[int32]int: + fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e) + + case map[int32]int8: + fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e) + case *map[int32]int8: + fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e) + + case map[int32]int16: + fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e) + case *map[int32]int16: + fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e) + + case map[int32]int32: + fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e) + case *map[int32]int32: + fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e) + + case map[int32]int64: + fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e) + case *map[int32]int64: + fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e) + + case map[int32]float32: + fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e) + case *map[int32]float32: + fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e) + + case map[int32]float64: + fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e) + case *map[int32]float64: + fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e) + + case map[int32]bool: + fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e) + case *map[int32]bool: + fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e) + + case []int64: + fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e) + case *[]int64: + fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e) + + case map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e) + case *map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e) + + case map[int64]string: + fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e) + case *map[int64]string: + fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e) + + case map[int64]uint: + fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e) + case *map[int64]uint: + fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e) + + case map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e) + case *map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e) + case *map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e) + case *map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e) + case *map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e) + case *map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int64]int: + fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e) + case *map[int64]int: + fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e) + + case map[int64]int8: + fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e) + case *map[int64]int8: + fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e) + + case map[int64]int16: + fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e) + case *map[int64]int16: + fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e) + + case map[int64]int32: + fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e) + case *map[int64]int32: + fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e) + + case map[int64]int64: + fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e) + case *map[int64]int64: + fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e) + + case map[int64]float32: + fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e) + case *map[int64]float32: + fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e) + + case map[int64]float64: + fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e) + case *map[int64]float64: + fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e) + + case map[int64]bool: + fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e) + case *map[int64]bool: + fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e) + + case []bool: + fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e) + case *[]bool: + fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e) + + case map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e) + case *map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e) + + case map[bool]string: + fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e) + case *map[bool]string: + fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e) + + case map[bool]uint: + fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e) + case *map[bool]uint: + fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e) + + case map[bool]uint8: + fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e) + case *map[bool]uint8: + fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint16: + fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e) + case *map[bool]uint16: + fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint32: + fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e) + case *map[bool]uint32: + fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint64: + fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e) + case *map[bool]uint64: + fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e) + + case map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e) + case *map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e) + + case map[bool]int: + fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e) + case *map[bool]int: + fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e) + + case map[bool]int8: + fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e) + case *map[bool]int8: + fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e) + + case map[bool]int16: + fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e) + case *map[bool]int16: + fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e) + + case map[bool]int32: + fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e) + case *map[bool]int32: + fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e) + + case map[bool]int64: + fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e) + case *map[bool]int64: + fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e) + + case map[bool]float32: + fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e) + case *map[bool]float32: + fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e) + + case map[bool]float64: + fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e) + case *map[bool]float64: + fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e) + + case map[bool]bool: + fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e) + case *map[bool]bool: + fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e) + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { + + case []interface{}: + fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e) + case *[]interface{}: + fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e) + + case []string: + fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e) + case *[]string: + fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e) + + case []float32: + fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e) + case *[]float32: + fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e) + + case []float64: + fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e) + case *[]float64: + fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e) + + case []uint: + fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e) + case *[]uint: + fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e) + + case []uint16: + fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e) + case *[]uint16: + fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e) + + case []uint32: + fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e) + case *[]uint32: + fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e) + + case []uint64: + fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e) + case *[]uint64: + fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e) + + case []uintptr: + fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e) + case *[]uintptr: + fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e) + + case []int: + fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e) + case *[]int: + fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e) + + case []int8: + fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e) + case *[]int8: + fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e) + + case []int16: + fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e) + case *[]int16: + fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e) + + case []int32: + fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e) + case *[]int32: + fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e) + + case []int64: + fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e) + case *[]int64: + fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e) + + case []bool: + fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e) + case *[]bool: + fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e) + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { + + case map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e) + case *map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]string: + fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e) + case *map[interface{}]string: + fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint: + fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint: + fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e) + case *map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int: + fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e) + case *map[interface{}]int: + fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e) + case *map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e) + + case map[string]interface{}: + fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e) + case *map[string]interface{}: + fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e) + + case map[string]string: + fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e) + case *map[string]string: + fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e) + + case map[string]uint: + fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e) + case *map[string]uint: + fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e) + + case map[string]uint8: + fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e) + case *map[string]uint8: + fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e) + + case map[string]uint16: + fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e) + case *map[string]uint16: + fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e) + + case map[string]uint32: + fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e) + case *map[string]uint32: + fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e) + + case map[string]uint64: + fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e) + case *map[string]uint64: + fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e) + + case map[string]uintptr: + fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e) + case *map[string]uintptr: + fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e) + + case map[string]int: + fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e) + case *map[string]int: + fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e) + + case map[string]int8: + fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e) + case *map[string]int8: + fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e) + + case map[string]int16: + fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e) + case *map[string]int16: + fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e) + + case map[string]int32: + fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e) + case *map[string]int32: + fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e) + + case map[string]int64: + fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e) + case *map[string]int64: + fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e) + + case map[string]float32: + fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e) + case *map[string]float32: + fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e) + + case map[string]float64: + fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e) + case *map[string]float64: + fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e) + + case map[string]bool: + fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e) + case *map[string]bool: + fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e) + + case map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e) + case *map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e) + + case map[float32]string: + fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e) + case *map[float32]string: + fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e) + + case map[float32]uint: + fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e) + case *map[float32]uint: + fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e) + + case map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e) + case *map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e) + case *map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e) + case *map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e) + case *map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e) + case *map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[float32]int: + fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e) + case *map[float32]int: + fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e) + + case map[float32]int8: + fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e) + case *map[float32]int8: + fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e) + + case map[float32]int16: + fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e) + case *map[float32]int16: + fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e) + + case map[float32]int32: + fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e) + case *map[float32]int32: + fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e) + + case map[float32]int64: + fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e) + case *map[float32]int64: + fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e) + + case map[float32]float32: + fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e) + case *map[float32]float32: + fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e) + + case map[float32]float64: + fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e) + case *map[float32]float64: + fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e) + + case map[float32]bool: + fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e) + case *map[float32]bool: + fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e) + + case map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e) + case *map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e) + + case map[float64]string: + fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e) + case *map[float64]string: + fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e) + + case map[float64]uint: + fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e) + case *map[float64]uint: + fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e) + + case map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e) + case *map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e) + case *map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e) + case *map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e) + case *map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e) + case *map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[float64]int: + fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e) + case *map[float64]int: + fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e) + + case map[float64]int8: + fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e) + case *map[float64]int8: + fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e) + + case map[float64]int16: + fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e) + case *map[float64]int16: + fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e) + + case map[float64]int32: + fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e) + case *map[float64]int32: + fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e) + + case map[float64]int64: + fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e) + case *map[float64]int64: + fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e) + + case map[float64]float32: + fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e) + case *map[float64]float32: + fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e) + + case map[float64]float64: + fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e) + case *map[float64]float64: + fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e) + + case map[float64]bool: + fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e) + case *map[float64]bool: + fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e) + + case map[uint]interface{}: + fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e) + case *map[uint]interface{}: + fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e) + + case map[uint]string: + fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e) + case *map[uint]string: + fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e) + + case map[uint]uint: + fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e) + case *map[uint]uint: + fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e) + + case map[uint]uint8: + fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e) + case *map[uint]uint8: + fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint16: + fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e) + case *map[uint]uint16: + fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint32: + fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e) + case *map[uint]uint32: + fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint64: + fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e) + case *map[uint]uint64: + fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e) + + case map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e) + case *map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint]int: + fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e) + case *map[uint]int: + fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e) + + case map[uint]int8: + fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e) + case *map[uint]int8: + fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e) + + case map[uint]int16: + fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e) + case *map[uint]int16: + fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e) + + case map[uint]int32: + fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e) + case *map[uint]int32: + fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e) + + case map[uint]int64: + fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e) + case *map[uint]int64: + fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e) + + case map[uint]float32: + fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e) + case *map[uint]float32: + fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e) + + case map[uint]float64: + fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e) + case *map[uint]float64: + fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e) + + case map[uint]bool: + fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e) + case *map[uint]bool: + fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e) + + case map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e) + case *map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint8]string: + fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e) + case *map[uint8]string: + fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint: + fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e) + case *map[uint8]uint: + fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint8]int: + fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e) + case *map[uint8]int: + fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e) + + case map[uint8]int8: + fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e) + case *map[uint8]int8: + fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int16: + fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e) + case *map[uint8]int16: + fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int32: + fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e) + case *map[uint8]int32: + fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int64: + fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e) + case *map[uint8]int64: + fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]float32: + fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e) + case *map[uint8]float32: + fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]float64: + fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e) + case *map[uint8]float64: + fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]bool: + fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e) + case *map[uint8]bool: + fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e) + + case map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e) + case *map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint16]string: + fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e) + case *map[uint16]string: + fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint: + fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e) + case *map[uint16]uint: + fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint16]int: + fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e) + case *map[uint16]int: + fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e) + + case map[uint16]int8: + fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e) + case *map[uint16]int8: + fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int16: + fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e) + case *map[uint16]int16: + fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int32: + fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e) + case *map[uint16]int32: + fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int64: + fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e) + case *map[uint16]int64: + fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]float32: + fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e) + case *map[uint16]float32: + fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]float64: + fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e) + case *map[uint16]float64: + fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]bool: + fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e) + case *map[uint16]bool: + fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e) + + case map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e) + case *map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint32]string: + fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e) + case *map[uint32]string: + fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint: + fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e) + case *map[uint32]uint: + fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint32]int: + fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e) + case *map[uint32]int: + fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e) + + case map[uint32]int8: + fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e) + case *map[uint32]int8: + fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int16: + fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e) + case *map[uint32]int16: + fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int32: + fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e) + case *map[uint32]int32: + fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int64: + fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e) + case *map[uint32]int64: + fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]float32: + fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e) + case *map[uint32]float32: + fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]float64: + fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e) + case *map[uint32]float64: + fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]bool: + fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e) + case *map[uint32]bool: + fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e) + + case map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e) + case *map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint64]string: + fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e) + case *map[uint64]string: + fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint: + fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e) + case *map[uint64]uint: + fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint64]int: + fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e) + case *map[uint64]int: + fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e) + + case map[uint64]int8: + fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e) + case *map[uint64]int8: + fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int16: + fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e) + case *map[uint64]int16: + fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int32: + fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e) + case *map[uint64]int32: + fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int64: + fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e) + case *map[uint64]int64: + fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]float32: + fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e) + case *map[uint64]float32: + fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]float64: + fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e) + case *map[uint64]float64: + fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]bool: + fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e) + case *map[uint64]bool: + fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e) + case *map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]string: + fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e) + case *map[uintptr]string: + fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e) + case *map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int: + fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e) + case *map[uintptr]int: + fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e) + case *map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e) + + case map[int]interface{}: + fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e) + case *map[int]interface{}: + fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e) + + case map[int]string: + fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e) + case *map[int]string: + fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e) + + case map[int]uint: + fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e) + case *map[int]uint: + fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e) + + case map[int]uint8: + fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e) + case *map[int]uint8: + fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e) + + case map[int]uint16: + fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e) + case *map[int]uint16: + fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e) + + case map[int]uint32: + fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e) + case *map[int]uint32: + fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e) + + case map[int]uint64: + fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e) + case *map[int]uint64: + fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e) + + case map[int]uintptr: + fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e) + case *map[int]uintptr: + fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e) + + case map[int]int: + fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e) + case *map[int]int: + fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e) + + case map[int]int8: + fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e) + case *map[int]int8: + fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e) + + case map[int]int16: + fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e) + case *map[int]int16: + fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e) + + case map[int]int32: + fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e) + case *map[int]int32: + fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e) + + case map[int]int64: + fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e) + case *map[int]int64: + fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e) + + case map[int]float32: + fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e) + case *map[int]float32: + fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e) + + case map[int]float64: + fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e) + case *map[int]float64: + fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e) + + case map[int]bool: + fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e) + case *map[int]bool: + fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e) + + case map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e) + case *map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e) + + case map[int8]string: + fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e) + case *map[int8]string: + fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e) + + case map[int8]uint: + fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e) + case *map[int8]uint: + fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e) + + case map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e) + case *map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e) + case *map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e) + case *map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e) + case *map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e) + case *map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int8]int: + fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e) + case *map[int8]int: + fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e) + + case map[int8]int8: + fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e) + case *map[int8]int8: + fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e) + + case map[int8]int16: + fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e) + case *map[int8]int16: + fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e) + + case map[int8]int32: + fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e) + case *map[int8]int32: + fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e) + + case map[int8]int64: + fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e) + case *map[int8]int64: + fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e) + + case map[int8]float32: + fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e) + case *map[int8]float32: + fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e) + + case map[int8]float64: + fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e) + case *map[int8]float64: + fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e) + + case map[int8]bool: + fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e) + case *map[int8]bool: + fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e) + + case map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e) + case *map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e) + + case map[int16]string: + fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e) + case *map[int16]string: + fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e) + + case map[int16]uint: + fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e) + case *map[int16]uint: + fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e) + + case map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e) + case *map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e) + case *map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e) + case *map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e) + case *map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e) + case *map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int16]int: + fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e) + case *map[int16]int: + fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e) + + case map[int16]int8: + fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e) + case *map[int16]int8: + fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e) + + case map[int16]int16: + fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e) + case *map[int16]int16: + fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e) + + case map[int16]int32: + fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e) + case *map[int16]int32: + fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e) + + case map[int16]int64: + fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e) + case *map[int16]int64: + fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e) + + case map[int16]float32: + fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e) + case *map[int16]float32: + fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e) + + case map[int16]float64: + fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e) + case *map[int16]float64: + fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e) + + case map[int16]bool: + fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e) + case *map[int16]bool: + fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e) + + case map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e) + case *map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e) + + case map[int32]string: + fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e) + case *map[int32]string: + fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e) + + case map[int32]uint: + fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e) + case *map[int32]uint: + fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e) + + case map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e) + case *map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e) + case *map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e) + case *map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e) + case *map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e) + case *map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int32]int: + fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e) + case *map[int32]int: + fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e) + + case map[int32]int8: + fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e) + case *map[int32]int8: + fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e) + + case map[int32]int16: + fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e) + case *map[int32]int16: + fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e) + + case map[int32]int32: + fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e) + case *map[int32]int32: + fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e) + + case map[int32]int64: + fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e) + case *map[int32]int64: + fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e) + + case map[int32]float32: + fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e) + case *map[int32]float32: + fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e) + + case map[int32]float64: + fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e) + case *map[int32]float64: + fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e) + + case map[int32]bool: + fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e) + case *map[int32]bool: + fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e) + + case map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e) + case *map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e) + + case map[int64]string: + fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e) + case *map[int64]string: + fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e) + + case map[int64]uint: + fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e) + case *map[int64]uint: + fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e) + + case map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e) + case *map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e) + case *map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e) + case *map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e) + case *map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e) + case *map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int64]int: + fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e) + case *map[int64]int: + fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e) + + case map[int64]int8: + fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e) + case *map[int64]int8: + fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e) + + case map[int64]int16: + fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e) + case *map[int64]int16: + fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e) + + case map[int64]int32: + fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e) + case *map[int64]int32: + fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e) + + case map[int64]int64: + fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e) + case *map[int64]int64: + fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e) + + case map[int64]float32: + fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e) + case *map[int64]float32: + fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e) + + case map[int64]float64: + fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e) + case *map[int64]float64: + fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e) + + case map[int64]bool: + fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e) + case *map[int64]bool: + fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e) + + case map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e) + case *map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e) + + case map[bool]string: + fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e) + case *map[bool]string: + fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e) + + case map[bool]uint: + fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e) + case *map[bool]uint: + fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e) + + case map[bool]uint8: + fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e) + case *map[bool]uint8: + fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint16: + fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e) + case *map[bool]uint16: + fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint32: + fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e) + case *map[bool]uint32: + fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint64: + fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e) + case *map[bool]uint64: + fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e) + + case map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e) + case *map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e) + + case map[bool]int: + fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e) + case *map[bool]int: + fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e) + + case map[bool]int8: + fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e) + case *map[bool]int8: + fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e) + + case map[bool]int16: + fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e) + case *map[bool]int16: + fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e) + + case map[bool]int32: + fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e) + case *map[bool]int32: + fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e) + + case map[bool]int64: + fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e) + case *map[bool]int64: + fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e) + + case map[bool]float32: + fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e) + case *map[bool]float32: + fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e) + + case map[bool]float64: + fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e) + case *map[bool]float64: + fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e) + + case map[bool]bool: + fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e) + case *map[bool]bool: + fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e) + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +// -- -- fast path functions + +func (f *encFnInfo) fastpathEncSliceIntfR(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + e.encode(v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + e.encode(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceStringR(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeString(c_UTF8, v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceStringV(v []string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeString(c_UTF8, v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceFloat32R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeFloat32(v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeFloat32(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceFloat64R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeFloat64(v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeFloat64(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceUintR(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceUintV(v []uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceUint16R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceUint32R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceUint64R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceUintptrR(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + e.encode(v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + e.encode(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceIntR(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceIntV(v []int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceInt8R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceInt8V(v []int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceInt16R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceInt16V(v []int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceInt32R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceInt32V(v []int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceInt64R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceInt64V(v []int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceBoolR(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeBool(v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceBoolV(v []bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeBool(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfIntfR(rv reflect.Value) { + fastpathTV.EncMapIntfIntfV(rv.Interface().(map[interface{}]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfStringR(rv reflect.Value) { + fastpathTV.EncMapIntfStringV(rv.Interface().(map[interface{}]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUintR(rv reflect.Value) { + fastpathTV.EncMapIntfUintV(rv.Interface().(map[interface{}]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUint8R(rv reflect.Value) { + fastpathTV.EncMapIntfUint8V(rv.Interface().(map[interface{}]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUint16R(rv reflect.Value) { + fastpathTV.EncMapIntfUint16V(rv.Interface().(map[interface{}]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUint32R(rv reflect.Value) { + fastpathTV.EncMapIntfUint32V(rv.Interface().(map[interface{}]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUint64R(rv reflect.Value) { + fastpathTV.EncMapIntfUint64V(rv.Interface().(map[interface{}]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUintptrR(rv reflect.Value) { + fastpathTV.EncMapIntfUintptrV(rv.Interface().(map[interface{}]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfIntR(rv reflect.Value) { + fastpathTV.EncMapIntfIntV(rv.Interface().(map[interface{}]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfInt8R(rv reflect.Value) { + fastpathTV.EncMapIntfInt8V(rv.Interface().(map[interface{}]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfInt16R(rv reflect.Value) { + fastpathTV.EncMapIntfInt16V(rv.Interface().(map[interface{}]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfInt32R(rv reflect.Value) { + fastpathTV.EncMapIntfInt32V(rv.Interface().(map[interface{}]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfInt64R(rv reflect.Value) { + fastpathTV.EncMapIntfInt64V(rv.Interface().(map[interface{}]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfFloat32R(rv reflect.Value) { + fastpathTV.EncMapIntfFloat32V(rv.Interface().(map[interface{}]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfFloat64R(rv reflect.Value) { + fastpathTV.EncMapIntfFloat64V(rv.Interface().(map[interface{}]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfBoolR(rv reflect.Value) { + fastpathTV.EncMapIntfBoolV(rv.Interface().(map[interface{}]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringIntfR(rv reflect.Value) { + fastpathTV.EncMapStringIntfV(rv.Interface().(map[string]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringStringR(rv reflect.Value) { + fastpathTV.EncMapStringStringV(rv.Interface().(map[string]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringStringV(v map[string]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUintR(rv reflect.Value) { + fastpathTV.EncMapStringUintV(rv.Interface().(map[string]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUintV(v map[string]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUint8R(rv reflect.Value) { + fastpathTV.EncMapStringUint8V(rv.Interface().(map[string]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUint16R(rv reflect.Value) { + fastpathTV.EncMapStringUint16V(rv.Interface().(map[string]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUint32R(rv reflect.Value) { + fastpathTV.EncMapStringUint32V(rv.Interface().(map[string]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUint64R(rv reflect.Value) { + fastpathTV.EncMapStringUint64V(rv.Interface().(map[string]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUintptrR(rv reflect.Value) { + fastpathTV.EncMapStringUintptrV(rv.Interface().(map[string]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringIntR(rv reflect.Value) { + fastpathTV.EncMapStringIntV(rv.Interface().(map[string]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringIntV(v map[string]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringInt8R(rv reflect.Value) { + fastpathTV.EncMapStringInt8V(rv.Interface().(map[string]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringInt8V(v map[string]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringInt16R(rv reflect.Value) { + fastpathTV.EncMapStringInt16V(rv.Interface().(map[string]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringInt16V(v map[string]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringInt32R(rv reflect.Value) { + fastpathTV.EncMapStringInt32V(rv.Interface().(map[string]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringInt32V(v map[string]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringInt64R(rv reflect.Value) { + fastpathTV.EncMapStringInt64V(rv.Interface().(map[string]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringInt64V(v map[string]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringFloat32R(rv reflect.Value) { + fastpathTV.EncMapStringFloat32V(rv.Interface().(map[string]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringFloat64R(rv reflect.Value) { + fastpathTV.EncMapStringFloat64V(rv.Interface().(map[string]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringBoolR(rv reflect.Value) { + fastpathTV.EncMapStringBoolV(rv.Interface().(map[string]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringBoolV(v map[string]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32IntfR(rv reflect.Value) { + fastpathTV.EncMapFloat32IntfV(rv.Interface().(map[float32]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32StringR(rv reflect.Value) { + fastpathTV.EncMapFloat32StringV(rv.Interface().(map[float32]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32UintR(rv reflect.Value) { + fastpathTV.EncMapFloat32UintV(rv.Interface().(map[float32]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Uint8R(rv reflect.Value) { + fastpathTV.EncMapFloat32Uint8V(rv.Interface().(map[float32]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Uint16R(rv reflect.Value) { + fastpathTV.EncMapFloat32Uint16V(rv.Interface().(map[float32]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Uint32R(rv reflect.Value) { + fastpathTV.EncMapFloat32Uint32V(rv.Interface().(map[float32]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Uint64R(rv reflect.Value) { + fastpathTV.EncMapFloat32Uint64V(rv.Interface().(map[float32]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32UintptrR(rv reflect.Value) { + fastpathTV.EncMapFloat32UintptrV(rv.Interface().(map[float32]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32IntR(rv reflect.Value) { + fastpathTV.EncMapFloat32IntV(rv.Interface().(map[float32]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Int8R(rv reflect.Value) { + fastpathTV.EncMapFloat32Int8V(rv.Interface().(map[float32]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Int16R(rv reflect.Value) { + fastpathTV.EncMapFloat32Int16V(rv.Interface().(map[float32]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Int32R(rv reflect.Value) { + fastpathTV.EncMapFloat32Int32V(rv.Interface().(map[float32]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Int64R(rv reflect.Value) { + fastpathTV.EncMapFloat32Int64V(rv.Interface().(map[float32]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Float32R(rv reflect.Value) { + fastpathTV.EncMapFloat32Float32V(rv.Interface().(map[float32]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Float64R(rv reflect.Value) { + fastpathTV.EncMapFloat32Float64V(rv.Interface().(map[float32]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32BoolR(rv reflect.Value) { + fastpathTV.EncMapFloat32BoolV(rv.Interface().(map[float32]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64IntfR(rv reflect.Value) { + fastpathTV.EncMapFloat64IntfV(rv.Interface().(map[float64]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64StringR(rv reflect.Value) { + fastpathTV.EncMapFloat64StringV(rv.Interface().(map[float64]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64UintR(rv reflect.Value) { + fastpathTV.EncMapFloat64UintV(rv.Interface().(map[float64]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Uint8R(rv reflect.Value) { + fastpathTV.EncMapFloat64Uint8V(rv.Interface().(map[float64]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Uint16R(rv reflect.Value) { + fastpathTV.EncMapFloat64Uint16V(rv.Interface().(map[float64]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Uint32R(rv reflect.Value) { + fastpathTV.EncMapFloat64Uint32V(rv.Interface().(map[float64]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Uint64R(rv reflect.Value) { + fastpathTV.EncMapFloat64Uint64V(rv.Interface().(map[float64]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64UintptrR(rv reflect.Value) { + fastpathTV.EncMapFloat64UintptrV(rv.Interface().(map[float64]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64IntR(rv reflect.Value) { + fastpathTV.EncMapFloat64IntV(rv.Interface().(map[float64]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Int8R(rv reflect.Value) { + fastpathTV.EncMapFloat64Int8V(rv.Interface().(map[float64]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Int16R(rv reflect.Value) { + fastpathTV.EncMapFloat64Int16V(rv.Interface().(map[float64]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Int32R(rv reflect.Value) { + fastpathTV.EncMapFloat64Int32V(rv.Interface().(map[float64]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Int64R(rv reflect.Value) { + fastpathTV.EncMapFloat64Int64V(rv.Interface().(map[float64]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Float32R(rv reflect.Value) { + fastpathTV.EncMapFloat64Float32V(rv.Interface().(map[float64]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Float64R(rv reflect.Value) { + fastpathTV.EncMapFloat64Float64V(rv.Interface().(map[float64]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64BoolR(rv reflect.Value) { + fastpathTV.EncMapFloat64BoolV(rv.Interface().(map[float64]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintIntfR(rv reflect.Value) { + fastpathTV.EncMapUintIntfV(rv.Interface().(map[uint]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintStringR(rv reflect.Value) { + fastpathTV.EncMapUintStringV(rv.Interface().(map[uint]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintStringV(v map[uint]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUintR(rv reflect.Value) { + fastpathTV.EncMapUintUintV(rv.Interface().(map[uint]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUintV(v map[uint]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUint8R(rv reflect.Value) { + fastpathTV.EncMapUintUint8V(rv.Interface().(map[uint]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUint16R(rv reflect.Value) { + fastpathTV.EncMapUintUint16V(rv.Interface().(map[uint]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUint32R(rv reflect.Value) { + fastpathTV.EncMapUintUint32V(rv.Interface().(map[uint]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUint64R(rv reflect.Value) { + fastpathTV.EncMapUintUint64V(rv.Interface().(map[uint]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUintptrR(rv reflect.Value) { + fastpathTV.EncMapUintUintptrV(rv.Interface().(map[uint]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintIntR(rv reflect.Value) { + fastpathTV.EncMapUintIntV(rv.Interface().(map[uint]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintIntV(v map[uint]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintInt8R(rv reflect.Value) { + fastpathTV.EncMapUintInt8V(rv.Interface().(map[uint]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintInt16R(rv reflect.Value) { + fastpathTV.EncMapUintInt16V(rv.Interface().(map[uint]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintInt32R(rv reflect.Value) { + fastpathTV.EncMapUintInt32V(rv.Interface().(map[uint]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintInt64R(rv reflect.Value) { + fastpathTV.EncMapUintInt64V(rv.Interface().(map[uint]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintFloat32R(rv reflect.Value) { + fastpathTV.EncMapUintFloat32V(rv.Interface().(map[uint]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintFloat64R(rv reflect.Value) { + fastpathTV.EncMapUintFloat64V(rv.Interface().(map[uint]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintBoolR(rv reflect.Value) { + fastpathTV.EncMapUintBoolV(rv.Interface().(map[uint]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8IntfR(rv reflect.Value) { + fastpathTV.EncMapUint8IntfV(rv.Interface().(map[uint8]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8StringR(rv reflect.Value) { + fastpathTV.EncMapUint8StringV(rv.Interface().(map[uint8]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8UintR(rv reflect.Value) { + fastpathTV.EncMapUint8UintV(rv.Interface().(map[uint8]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Uint8R(rv reflect.Value) { + fastpathTV.EncMapUint8Uint8V(rv.Interface().(map[uint8]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Uint16R(rv reflect.Value) { + fastpathTV.EncMapUint8Uint16V(rv.Interface().(map[uint8]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Uint32R(rv reflect.Value) { + fastpathTV.EncMapUint8Uint32V(rv.Interface().(map[uint8]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Uint64R(rv reflect.Value) { + fastpathTV.EncMapUint8Uint64V(rv.Interface().(map[uint8]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8UintptrR(rv reflect.Value) { + fastpathTV.EncMapUint8UintptrV(rv.Interface().(map[uint8]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8IntR(rv reflect.Value) { + fastpathTV.EncMapUint8IntV(rv.Interface().(map[uint8]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Int8R(rv reflect.Value) { + fastpathTV.EncMapUint8Int8V(rv.Interface().(map[uint8]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Int16R(rv reflect.Value) { + fastpathTV.EncMapUint8Int16V(rv.Interface().(map[uint8]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Int32R(rv reflect.Value) { + fastpathTV.EncMapUint8Int32V(rv.Interface().(map[uint8]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Int64R(rv reflect.Value) { + fastpathTV.EncMapUint8Int64V(rv.Interface().(map[uint8]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Float32R(rv reflect.Value) { + fastpathTV.EncMapUint8Float32V(rv.Interface().(map[uint8]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Float64R(rv reflect.Value) { + fastpathTV.EncMapUint8Float64V(rv.Interface().(map[uint8]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8BoolR(rv reflect.Value) { + fastpathTV.EncMapUint8BoolV(rv.Interface().(map[uint8]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16IntfR(rv reflect.Value) { + fastpathTV.EncMapUint16IntfV(rv.Interface().(map[uint16]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16StringR(rv reflect.Value) { + fastpathTV.EncMapUint16StringV(rv.Interface().(map[uint16]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16UintR(rv reflect.Value) { + fastpathTV.EncMapUint16UintV(rv.Interface().(map[uint16]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Uint8R(rv reflect.Value) { + fastpathTV.EncMapUint16Uint8V(rv.Interface().(map[uint16]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Uint16R(rv reflect.Value) { + fastpathTV.EncMapUint16Uint16V(rv.Interface().(map[uint16]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Uint32R(rv reflect.Value) { + fastpathTV.EncMapUint16Uint32V(rv.Interface().(map[uint16]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Uint64R(rv reflect.Value) { + fastpathTV.EncMapUint16Uint64V(rv.Interface().(map[uint16]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16UintptrR(rv reflect.Value) { + fastpathTV.EncMapUint16UintptrV(rv.Interface().(map[uint16]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16IntR(rv reflect.Value) { + fastpathTV.EncMapUint16IntV(rv.Interface().(map[uint16]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Int8R(rv reflect.Value) { + fastpathTV.EncMapUint16Int8V(rv.Interface().(map[uint16]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Int16R(rv reflect.Value) { + fastpathTV.EncMapUint16Int16V(rv.Interface().(map[uint16]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Int32R(rv reflect.Value) { + fastpathTV.EncMapUint16Int32V(rv.Interface().(map[uint16]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Int64R(rv reflect.Value) { + fastpathTV.EncMapUint16Int64V(rv.Interface().(map[uint16]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Float32R(rv reflect.Value) { + fastpathTV.EncMapUint16Float32V(rv.Interface().(map[uint16]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Float64R(rv reflect.Value) { + fastpathTV.EncMapUint16Float64V(rv.Interface().(map[uint16]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16BoolR(rv reflect.Value) { + fastpathTV.EncMapUint16BoolV(rv.Interface().(map[uint16]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32IntfR(rv reflect.Value) { + fastpathTV.EncMapUint32IntfV(rv.Interface().(map[uint32]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32StringR(rv reflect.Value) { + fastpathTV.EncMapUint32StringV(rv.Interface().(map[uint32]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32UintR(rv reflect.Value) { + fastpathTV.EncMapUint32UintV(rv.Interface().(map[uint32]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Uint8R(rv reflect.Value) { + fastpathTV.EncMapUint32Uint8V(rv.Interface().(map[uint32]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Uint16R(rv reflect.Value) { + fastpathTV.EncMapUint32Uint16V(rv.Interface().(map[uint32]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Uint32R(rv reflect.Value) { + fastpathTV.EncMapUint32Uint32V(rv.Interface().(map[uint32]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Uint64R(rv reflect.Value) { + fastpathTV.EncMapUint32Uint64V(rv.Interface().(map[uint32]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32UintptrR(rv reflect.Value) { + fastpathTV.EncMapUint32UintptrV(rv.Interface().(map[uint32]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32IntR(rv reflect.Value) { + fastpathTV.EncMapUint32IntV(rv.Interface().(map[uint32]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Int8R(rv reflect.Value) { + fastpathTV.EncMapUint32Int8V(rv.Interface().(map[uint32]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Int16R(rv reflect.Value) { + fastpathTV.EncMapUint32Int16V(rv.Interface().(map[uint32]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Int32R(rv reflect.Value) { + fastpathTV.EncMapUint32Int32V(rv.Interface().(map[uint32]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Int64R(rv reflect.Value) { + fastpathTV.EncMapUint32Int64V(rv.Interface().(map[uint32]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Float32R(rv reflect.Value) { + fastpathTV.EncMapUint32Float32V(rv.Interface().(map[uint32]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Float64R(rv reflect.Value) { + fastpathTV.EncMapUint32Float64V(rv.Interface().(map[uint32]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32BoolR(rv reflect.Value) { + fastpathTV.EncMapUint32BoolV(rv.Interface().(map[uint32]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64IntfR(rv reflect.Value) { + fastpathTV.EncMapUint64IntfV(rv.Interface().(map[uint64]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64StringR(rv reflect.Value) { + fastpathTV.EncMapUint64StringV(rv.Interface().(map[uint64]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64UintR(rv reflect.Value) { + fastpathTV.EncMapUint64UintV(rv.Interface().(map[uint64]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Uint8R(rv reflect.Value) { + fastpathTV.EncMapUint64Uint8V(rv.Interface().(map[uint64]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Uint16R(rv reflect.Value) { + fastpathTV.EncMapUint64Uint16V(rv.Interface().(map[uint64]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Uint32R(rv reflect.Value) { + fastpathTV.EncMapUint64Uint32V(rv.Interface().(map[uint64]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Uint64R(rv reflect.Value) { + fastpathTV.EncMapUint64Uint64V(rv.Interface().(map[uint64]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64UintptrR(rv reflect.Value) { + fastpathTV.EncMapUint64UintptrV(rv.Interface().(map[uint64]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64IntR(rv reflect.Value) { + fastpathTV.EncMapUint64IntV(rv.Interface().(map[uint64]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Int8R(rv reflect.Value) { + fastpathTV.EncMapUint64Int8V(rv.Interface().(map[uint64]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Int16R(rv reflect.Value) { + fastpathTV.EncMapUint64Int16V(rv.Interface().(map[uint64]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Int32R(rv reflect.Value) { + fastpathTV.EncMapUint64Int32V(rv.Interface().(map[uint64]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Int64R(rv reflect.Value) { + fastpathTV.EncMapUint64Int64V(rv.Interface().(map[uint64]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Float32R(rv reflect.Value) { + fastpathTV.EncMapUint64Float32V(rv.Interface().(map[uint64]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Float64R(rv reflect.Value) { + fastpathTV.EncMapUint64Float64V(rv.Interface().(map[uint64]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64BoolR(rv reflect.Value) { + fastpathTV.EncMapUint64BoolV(rv.Interface().(map[uint64]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrIntfR(rv reflect.Value) { + fastpathTV.EncMapUintptrIntfV(rv.Interface().(map[uintptr]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrStringR(rv reflect.Value) { + fastpathTV.EncMapUintptrStringV(rv.Interface().(map[uintptr]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUintR(rv reflect.Value) { + fastpathTV.EncMapUintptrUintV(rv.Interface().(map[uintptr]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUint8R(rv reflect.Value) { + fastpathTV.EncMapUintptrUint8V(rv.Interface().(map[uintptr]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUint16R(rv reflect.Value) { + fastpathTV.EncMapUintptrUint16V(rv.Interface().(map[uintptr]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUint32R(rv reflect.Value) { + fastpathTV.EncMapUintptrUint32V(rv.Interface().(map[uintptr]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUint64R(rv reflect.Value) { + fastpathTV.EncMapUintptrUint64V(rv.Interface().(map[uintptr]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUintptrR(rv reflect.Value) { + fastpathTV.EncMapUintptrUintptrV(rv.Interface().(map[uintptr]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrIntR(rv reflect.Value) { + fastpathTV.EncMapUintptrIntV(rv.Interface().(map[uintptr]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrInt8R(rv reflect.Value) { + fastpathTV.EncMapUintptrInt8V(rv.Interface().(map[uintptr]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrInt16R(rv reflect.Value) { + fastpathTV.EncMapUintptrInt16V(rv.Interface().(map[uintptr]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrInt32R(rv reflect.Value) { + fastpathTV.EncMapUintptrInt32V(rv.Interface().(map[uintptr]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrInt64R(rv reflect.Value) { + fastpathTV.EncMapUintptrInt64V(rv.Interface().(map[uintptr]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrFloat32R(rv reflect.Value) { + fastpathTV.EncMapUintptrFloat32V(rv.Interface().(map[uintptr]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrFloat64R(rv reflect.Value) { + fastpathTV.EncMapUintptrFloat64V(rv.Interface().(map[uintptr]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrBoolR(rv reflect.Value) { + fastpathTV.EncMapUintptrBoolV(rv.Interface().(map[uintptr]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntIntfR(rv reflect.Value) { + fastpathTV.EncMapIntIntfV(rv.Interface().(map[int]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntStringR(rv reflect.Value) { + fastpathTV.EncMapIntStringV(rv.Interface().(map[int]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntStringV(v map[int]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUintR(rv reflect.Value) { + fastpathTV.EncMapIntUintV(rv.Interface().(map[int]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUintV(v map[int]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUint8R(rv reflect.Value) { + fastpathTV.EncMapIntUint8V(rv.Interface().(map[int]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUint16R(rv reflect.Value) { + fastpathTV.EncMapIntUint16V(rv.Interface().(map[int]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUint32R(rv reflect.Value) { + fastpathTV.EncMapIntUint32V(rv.Interface().(map[int]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUint64R(rv reflect.Value) { + fastpathTV.EncMapIntUint64V(rv.Interface().(map[int]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUintptrR(rv reflect.Value) { + fastpathTV.EncMapIntUintptrV(rv.Interface().(map[int]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntIntR(rv reflect.Value) { + fastpathTV.EncMapIntIntV(rv.Interface().(map[int]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntIntV(v map[int]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntInt8R(rv reflect.Value) { + fastpathTV.EncMapIntInt8V(rv.Interface().(map[int]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntInt8V(v map[int]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntInt16R(rv reflect.Value) { + fastpathTV.EncMapIntInt16V(rv.Interface().(map[int]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntInt16V(v map[int]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntInt32R(rv reflect.Value) { + fastpathTV.EncMapIntInt32V(rv.Interface().(map[int]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntInt32V(v map[int]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntInt64R(rv reflect.Value) { + fastpathTV.EncMapIntInt64V(rv.Interface().(map[int]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntInt64V(v map[int]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntFloat32R(rv reflect.Value) { + fastpathTV.EncMapIntFloat32V(rv.Interface().(map[int]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntFloat64R(rv reflect.Value) { + fastpathTV.EncMapIntFloat64V(rv.Interface().(map[int]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntBoolR(rv reflect.Value) { + fastpathTV.EncMapIntBoolV(rv.Interface().(map[int]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntBoolV(v map[int]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8IntfR(rv reflect.Value) { + fastpathTV.EncMapInt8IntfV(rv.Interface().(map[int8]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8StringR(rv reflect.Value) { + fastpathTV.EncMapInt8StringV(rv.Interface().(map[int8]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8StringV(v map[int8]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8UintR(rv reflect.Value) { + fastpathTV.EncMapInt8UintV(rv.Interface().(map[int8]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Uint8R(rv reflect.Value) { + fastpathTV.EncMapInt8Uint8V(rv.Interface().(map[int8]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Uint16R(rv reflect.Value) { + fastpathTV.EncMapInt8Uint16V(rv.Interface().(map[int8]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Uint32R(rv reflect.Value) { + fastpathTV.EncMapInt8Uint32V(rv.Interface().(map[int8]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Uint64R(rv reflect.Value) { + fastpathTV.EncMapInt8Uint64V(rv.Interface().(map[int8]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8UintptrR(rv reflect.Value) { + fastpathTV.EncMapInt8UintptrV(rv.Interface().(map[int8]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8IntR(rv reflect.Value) { + fastpathTV.EncMapInt8IntV(rv.Interface().(map[int8]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8IntV(v map[int8]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Int8R(rv reflect.Value) { + fastpathTV.EncMapInt8Int8V(rv.Interface().(map[int8]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Int16R(rv reflect.Value) { + fastpathTV.EncMapInt8Int16V(rv.Interface().(map[int8]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Int32R(rv reflect.Value) { + fastpathTV.EncMapInt8Int32V(rv.Interface().(map[int8]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Int64R(rv reflect.Value) { + fastpathTV.EncMapInt8Int64V(rv.Interface().(map[int8]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Float32R(rv reflect.Value) { + fastpathTV.EncMapInt8Float32V(rv.Interface().(map[int8]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Float64R(rv reflect.Value) { + fastpathTV.EncMapInt8Float64V(rv.Interface().(map[int8]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8BoolR(rv reflect.Value) { + fastpathTV.EncMapInt8BoolV(rv.Interface().(map[int8]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16IntfR(rv reflect.Value) { + fastpathTV.EncMapInt16IntfV(rv.Interface().(map[int16]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16StringR(rv reflect.Value) { + fastpathTV.EncMapInt16StringV(rv.Interface().(map[int16]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16StringV(v map[int16]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16UintR(rv reflect.Value) { + fastpathTV.EncMapInt16UintV(rv.Interface().(map[int16]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Uint8R(rv reflect.Value) { + fastpathTV.EncMapInt16Uint8V(rv.Interface().(map[int16]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Uint16R(rv reflect.Value) { + fastpathTV.EncMapInt16Uint16V(rv.Interface().(map[int16]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Uint32R(rv reflect.Value) { + fastpathTV.EncMapInt16Uint32V(rv.Interface().(map[int16]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Uint64R(rv reflect.Value) { + fastpathTV.EncMapInt16Uint64V(rv.Interface().(map[int16]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16UintptrR(rv reflect.Value) { + fastpathTV.EncMapInt16UintptrV(rv.Interface().(map[int16]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16IntR(rv reflect.Value) { + fastpathTV.EncMapInt16IntV(rv.Interface().(map[int16]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16IntV(v map[int16]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Int8R(rv reflect.Value) { + fastpathTV.EncMapInt16Int8V(rv.Interface().(map[int16]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Int16R(rv reflect.Value) { + fastpathTV.EncMapInt16Int16V(rv.Interface().(map[int16]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Int32R(rv reflect.Value) { + fastpathTV.EncMapInt16Int32V(rv.Interface().(map[int16]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Int64R(rv reflect.Value) { + fastpathTV.EncMapInt16Int64V(rv.Interface().(map[int16]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Float32R(rv reflect.Value) { + fastpathTV.EncMapInt16Float32V(rv.Interface().(map[int16]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Float64R(rv reflect.Value) { + fastpathTV.EncMapInt16Float64V(rv.Interface().(map[int16]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16BoolR(rv reflect.Value) { + fastpathTV.EncMapInt16BoolV(rv.Interface().(map[int16]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32IntfR(rv reflect.Value) { + fastpathTV.EncMapInt32IntfV(rv.Interface().(map[int32]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32StringR(rv reflect.Value) { + fastpathTV.EncMapInt32StringV(rv.Interface().(map[int32]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32StringV(v map[int32]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32UintR(rv reflect.Value) { + fastpathTV.EncMapInt32UintV(rv.Interface().(map[int32]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Uint8R(rv reflect.Value) { + fastpathTV.EncMapInt32Uint8V(rv.Interface().(map[int32]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Uint16R(rv reflect.Value) { + fastpathTV.EncMapInt32Uint16V(rv.Interface().(map[int32]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Uint32R(rv reflect.Value) { + fastpathTV.EncMapInt32Uint32V(rv.Interface().(map[int32]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Uint64R(rv reflect.Value) { + fastpathTV.EncMapInt32Uint64V(rv.Interface().(map[int32]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32UintptrR(rv reflect.Value) { + fastpathTV.EncMapInt32UintptrV(rv.Interface().(map[int32]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32IntR(rv reflect.Value) { + fastpathTV.EncMapInt32IntV(rv.Interface().(map[int32]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32IntV(v map[int32]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Int8R(rv reflect.Value) { + fastpathTV.EncMapInt32Int8V(rv.Interface().(map[int32]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Int16R(rv reflect.Value) { + fastpathTV.EncMapInt32Int16V(rv.Interface().(map[int32]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Int32R(rv reflect.Value) { + fastpathTV.EncMapInt32Int32V(rv.Interface().(map[int32]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Int64R(rv reflect.Value) { + fastpathTV.EncMapInt32Int64V(rv.Interface().(map[int32]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Float32R(rv reflect.Value) { + fastpathTV.EncMapInt32Float32V(rv.Interface().(map[int32]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Float64R(rv reflect.Value) { + fastpathTV.EncMapInt32Float64V(rv.Interface().(map[int32]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32BoolR(rv reflect.Value) { + fastpathTV.EncMapInt32BoolV(rv.Interface().(map[int32]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64IntfR(rv reflect.Value) { + fastpathTV.EncMapInt64IntfV(rv.Interface().(map[int64]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64StringR(rv reflect.Value) { + fastpathTV.EncMapInt64StringV(rv.Interface().(map[int64]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64StringV(v map[int64]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64UintR(rv reflect.Value) { + fastpathTV.EncMapInt64UintV(rv.Interface().(map[int64]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Uint8R(rv reflect.Value) { + fastpathTV.EncMapInt64Uint8V(rv.Interface().(map[int64]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Uint16R(rv reflect.Value) { + fastpathTV.EncMapInt64Uint16V(rv.Interface().(map[int64]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Uint32R(rv reflect.Value) { + fastpathTV.EncMapInt64Uint32V(rv.Interface().(map[int64]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Uint64R(rv reflect.Value) { + fastpathTV.EncMapInt64Uint64V(rv.Interface().(map[int64]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64UintptrR(rv reflect.Value) { + fastpathTV.EncMapInt64UintptrV(rv.Interface().(map[int64]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64IntR(rv reflect.Value) { + fastpathTV.EncMapInt64IntV(rv.Interface().(map[int64]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64IntV(v map[int64]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Int8R(rv reflect.Value) { + fastpathTV.EncMapInt64Int8V(rv.Interface().(map[int64]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Int16R(rv reflect.Value) { + fastpathTV.EncMapInt64Int16V(rv.Interface().(map[int64]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Int32R(rv reflect.Value) { + fastpathTV.EncMapInt64Int32V(rv.Interface().(map[int64]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Int64R(rv reflect.Value) { + fastpathTV.EncMapInt64Int64V(rv.Interface().(map[int64]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Float32R(rv reflect.Value) { + fastpathTV.EncMapInt64Float32V(rv.Interface().(map[int64]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Float64R(rv reflect.Value) { + fastpathTV.EncMapInt64Float64V(rv.Interface().(map[int64]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64BoolR(rv reflect.Value) { + fastpathTV.EncMapInt64BoolV(rv.Interface().(map[int64]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolIntfR(rv reflect.Value) { + fastpathTV.EncMapBoolIntfV(rv.Interface().(map[bool]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolStringR(rv reflect.Value) { + fastpathTV.EncMapBoolStringV(rv.Interface().(map[bool]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolStringV(v map[bool]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUintR(rv reflect.Value) { + fastpathTV.EncMapBoolUintV(rv.Interface().(map[bool]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUint8R(rv reflect.Value) { + fastpathTV.EncMapBoolUint8V(rv.Interface().(map[bool]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUint16R(rv reflect.Value) { + fastpathTV.EncMapBoolUint16V(rv.Interface().(map[bool]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUint32R(rv reflect.Value) { + fastpathTV.EncMapBoolUint32V(rv.Interface().(map[bool]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUint64R(rv reflect.Value) { + fastpathTV.EncMapBoolUint64V(rv.Interface().(map[bool]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUintptrR(rv reflect.Value) { + fastpathTV.EncMapBoolUintptrV(rv.Interface().(map[bool]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolIntR(rv reflect.Value) { + fastpathTV.EncMapBoolIntV(rv.Interface().(map[bool]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolIntV(v map[bool]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolInt8R(rv reflect.Value) { + fastpathTV.EncMapBoolInt8V(rv.Interface().(map[bool]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolInt16R(rv reflect.Value) { + fastpathTV.EncMapBoolInt16V(rv.Interface().(map[bool]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolInt32R(rv reflect.Value) { + fastpathTV.EncMapBoolInt32V(rv.Interface().(map[bool]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolInt64R(rv reflect.Value) { + fastpathTV.EncMapBoolInt64V(rv.Interface().(map[bool]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolFloat32R(rv reflect.Value) { + fastpathTV.EncMapBoolFloat32V(rv.Interface().(map[bool]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolFloat64R(rv reflect.Value) { + fastpathTV.EncMapBoolFloat64V(rv.Interface().(map[bool]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolBoolR(rv reflect.Value) { + fastpathTV.EncMapBoolBoolV(rv.Interface().(map[bool]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +// -- decode + +// -- -- fast path type switch +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { + switch v := iv.(type) { + + case []interface{}: + fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, d) + case *[]interface{}: + v2, changed2 := fastpathTV.DecSliceIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]interface{}: + fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]interface{}: + v2, changed2 := fastpathTV.DecMapIntfIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]string: + fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]string: + v2, changed2 := fastpathTV.DecMapIntfStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uint: + fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uint: + v2, changed2 := fastpathTV.DecMapIntfUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uint8: + fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uint8: + v2, changed2 := fastpathTV.DecMapIntfUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uint16: + fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uint16: + v2, changed2 := fastpathTV.DecMapIntfUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uint32: + fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uint32: + v2, changed2 := fastpathTV.DecMapIntfUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uint64: + fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uint64: + v2, changed2 := fastpathTV.DecMapIntfUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uintptr: + fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uintptr: + v2, changed2 := fastpathTV.DecMapIntfUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]int: + fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]int: + v2, changed2 := fastpathTV.DecMapIntfIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]int8: + fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]int8: + v2, changed2 := fastpathTV.DecMapIntfInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]int16: + fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]int16: + v2, changed2 := fastpathTV.DecMapIntfInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]int32: + fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]int32: + v2, changed2 := fastpathTV.DecMapIntfInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]int64: + fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]int64: + v2, changed2 := fastpathTV.DecMapIntfInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]float32: + fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]float32: + v2, changed2 := fastpathTV.DecMapIntfFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]float64: + fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]float64: + v2, changed2 := fastpathTV.DecMapIntfFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]bool: + fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]bool: + v2, changed2 := fastpathTV.DecMapIntfBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []string: + fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, d) + case *[]string: + v2, changed2 := fastpathTV.DecSliceStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]interface{}: + fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, d) + case *map[string]interface{}: + v2, changed2 := fastpathTV.DecMapStringIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]string: + fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, d) + case *map[string]string: + v2, changed2 := fastpathTV.DecMapStringStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uint: + fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, d) + case *map[string]uint: + v2, changed2 := fastpathTV.DecMapStringUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uint8: + fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, d) + case *map[string]uint8: + v2, changed2 := fastpathTV.DecMapStringUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uint16: + fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, d) + case *map[string]uint16: + v2, changed2 := fastpathTV.DecMapStringUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uint32: + fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, d) + case *map[string]uint32: + v2, changed2 := fastpathTV.DecMapStringUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uint64: + fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, d) + case *map[string]uint64: + v2, changed2 := fastpathTV.DecMapStringUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uintptr: + fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[string]uintptr: + v2, changed2 := fastpathTV.DecMapStringUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]int: + fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, d) + case *map[string]int: + v2, changed2 := fastpathTV.DecMapStringIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]int8: + fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, d) + case *map[string]int8: + v2, changed2 := fastpathTV.DecMapStringInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]int16: + fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, d) + case *map[string]int16: + v2, changed2 := fastpathTV.DecMapStringInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]int32: + fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, d) + case *map[string]int32: + v2, changed2 := fastpathTV.DecMapStringInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]int64: + fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, d) + case *map[string]int64: + v2, changed2 := fastpathTV.DecMapStringInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]float32: + fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[string]float32: + v2, changed2 := fastpathTV.DecMapStringFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]float64: + fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[string]float64: + v2, changed2 := fastpathTV.DecMapStringFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]bool: + fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, d) + case *map[string]bool: + v2, changed2 := fastpathTV.DecMapStringBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []float32: + fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, d) + case *[]float32: + v2, changed2 := fastpathTV.DecSliceFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]interface{}: + fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, d) + case *map[float32]interface{}: + v2, changed2 := fastpathTV.DecMapFloat32IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]string: + fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, d) + case *map[float32]string: + v2, changed2 := fastpathTV.DecMapFloat32StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uint: + fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, d) + case *map[float32]uint: + v2, changed2 := fastpathTV.DecMapFloat32UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uint8: + fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[float32]uint8: + v2, changed2 := fastpathTV.DecMapFloat32Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uint16: + fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[float32]uint16: + v2, changed2 := fastpathTV.DecMapFloat32Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uint32: + fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[float32]uint32: + v2, changed2 := fastpathTV.DecMapFloat32Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uint64: + fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[float32]uint64: + v2, changed2 := fastpathTV.DecMapFloat32Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uintptr: + fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[float32]uintptr: + v2, changed2 := fastpathTV.DecMapFloat32UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]int: + fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, d) + case *map[float32]int: + v2, changed2 := fastpathTV.DecMapFloat32IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]int8: + fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, d) + case *map[float32]int8: + v2, changed2 := fastpathTV.DecMapFloat32Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]int16: + fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, d) + case *map[float32]int16: + v2, changed2 := fastpathTV.DecMapFloat32Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]int32: + fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, d) + case *map[float32]int32: + v2, changed2 := fastpathTV.DecMapFloat32Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]int64: + fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, d) + case *map[float32]int64: + v2, changed2 := fastpathTV.DecMapFloat32Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]float32: + fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, d) + case *map[float32]float32: + v2, changed2 := fastpathTV.DecMapFloat32Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]float64: + fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, d) + case *map[float32]float64: + v2, changed2 := fastpathTV.DecMapFloat32Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]bool: + fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, d) + case *map[float32]bool: + v2, changed2 := fastpathTV.DecMapFloat32BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []float64: + fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, d) + case *[]float64: + v2, changed2 := fastpathTV.DecSliceFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]interface{}: + fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, d) + case *map[float64]interface{}: + v2, changed2 := fastpathTV.DecMapFloat64IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]string: + fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, d) + case *map[float64]string: + v2, changed2 := fastpathTV.DecMapFloat64StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uint: + fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, d) + case *map[float64]uint: + v2, changed2 := fastpathTV.DecMapFloat64UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uint8: + fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[float64]uint8: + v2, changed2 := fastpathTV.DecMapFloat64Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uint16: + fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[float64]uint16: + v2, changed2 := fastpathTV.DecMapFloat64Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uint32: + fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[float64]uint32: + v2, changed2 := fastpathTV.DecMapFloat64Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uint64: + fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[float64]uint64: + v2, changed2 := fastpathTV.DecMapFloat64Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uintptr: + fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[float64]uintptr: + v2, changed2 := fastpathTV.DecMapFloat64UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]int: + fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, d) + case *map[float64]int: + v2, changed2 := fastpathTV.DecMapFloat64IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]int8: + fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, d) + case *map[float64]int8: + v2, changed2 := fastpathTV.DecMapFloat64Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]int16: + fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, d) + case *map[float64]int16: + v2, changed2 := fastpathTV.DecMapFloat64Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]int32: + fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, d) + case *map[float64]int32: + v2, changed2 := fastpathTV.DecMapFloat64Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]int64: + fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, d) + case *map[float64]int64: + v2, changed2 := fastpathTV.DecMapFloat64Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]float32: + fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, d) + case *map[float64]float32: + v2, changed2 := fastpathTV.DecMapFloat64Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]float64: + fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, d) + case *map[float64]float64: + v2, changed2 := fastpathTV.DecMapFloat64Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]bool: + fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, d) + case *map[float64]bool: + v2, changed2 := fastpathTV.DecMapFloat64BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []uint: + fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, d) + case *[]uint: + v2, changed2 := fastpathTV.DecSliceUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]interface{}: + fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, d) + case *map[uint]interface{}: + v2, changed2 := fastpathTV.DecMapUintIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]string: + fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, d) + case *map[uint]string: + v2, changed2 := fastpathTV.DecMapUintStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uint: + fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, d) + case *map[uint]uint: + v2, changed2 := fastpathTV.DecMapUintUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uint8: + fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, d) + case *map[uint]uint8: + v2, changed2 := fastpathTV.DecMapUintUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uint16: + fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, d) + case *map[uint]uint16: + v2, changed2 := fastpathTV.DecMapUintUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uint32: + fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, d) + case *map[uint]uint32: + v2, changed2 := fastpathTV.DecMapUintUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uint64: + fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, d) + case *map[uint]uint64: + v2, changed2 := fastpathTV.DecMapUintUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uintptr: + fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uint]uintptr: + v2, changed2 := fastpathTV.DecMapUintUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]int: + fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, d) + case *map[uint]int: + v2, changed2 := fastpathTV.DecMapUintIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]int8: + fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, d) + case *map[uint]int8: + v2, changed2 := fastpathTV.DecMapUintInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]int16: + fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, d) + case *map[uint]int16: + v2, changed2 := fastpathTV.DecMapUintInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]int32: + fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, d) + case *map[uint]int32: + v2, changed2 := fastpathTV.DecMapUintInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]int64: + fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, d) + case *map[uint]int64: + v2, changed2 := fastpathTV.DecMapUintInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]float32: + fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[uint]float32: + v2, changed2 := fastpathTV.DecMapUintFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]float64: + fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[uint]float64: + v2, changed2 := fastpathTV.DecMapUintFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]bool: + fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, d) + case *map[uint]bool: + v2, changed2 := fastpathTV.DecMapUintBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]interface{}: + fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]interface{}: + v2, changed2 := fastpathTV.DecMapUint8IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]string: + fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]string: + v2, changed2 := fastpathTV.DecMapUint8StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uint: + fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uint: + v2, changed2 := fastpathTV.DecMapUint8UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uint8: + fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uint8: + v2, changed2 := fastpathTV.DecMapUint8Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uint16: + fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uint16: + v2, changed2 := fastpathTV.DecMapUint8Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uint32: + fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uint32: + v2, changed2 := fastpathTV.DecMapUint8Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uint64: + fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uint64: + v2, changed2 := fastpathTV.DecMapUint8Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uintptr: + fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uintptr: + v2, changed2 := fastpathTV.DecMapUint8UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]int: + fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]int: + v2, changed2 := fastpathTV.DecMapUint8IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]int8: + fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]int8: + v2, changed2 := fastpathTV.DecMapUint8Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]int16: + fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]int16: + v2, changed2 := fastpathTV.DecMapUint8Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]int32: + fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]int32: + v2, changed2 := fastpathTV.DecMapUint8Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]int64: + fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]int64: + v2, changed2 := fastpathTV.DecMapUint8Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]float32: + fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]float32: + v2, changed2 := fastpathTV.DecMapUint8Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]float64: + fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]float64: + v2, changed2 := fastpathTV.DecMapUint8Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]bool: + fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]bool: + v2, changed2 := fastpathTV.DecMapUint8BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []uint16: + fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, d) + case *[]uint16: + v2, changed2 := fastpathTV.DecSliceUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]interface{}: + fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]interface{}: + v2, changed2 := fastpathTV.DecMapUint16IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]string: + fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]string: + v2, changed2 := fastpathTV.DecMapUint16StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uint: + fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uint: + v2, changed2 := fastpathTV.DecMapUint16UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uint8: + fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uint8: + v2, changed2 := fastpathTV.DecMapUint16Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uint16: + fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uint16: + v2, changed2 := fastpathTV.DecMapUint16Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uint32: + fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uint32: + v2, changed2 := fastpathTV.DecMapUint16Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uint64: + fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uint64: + v2, changed2 := fastpathTV.DecMapUint16Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uintptr: + fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uintptr: + v2, changed2 := fastpathTV.DecMapUint16UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]int: + fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]int: + v2, changed2 := fastpathTV.DecMapUint16IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]int8: + fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]int8: + v2, changed2 := fastpathTV.DecMapUint16Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]int16: + fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]int16: + v2, changed2 := fastpathTV.DecMapUint16Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]int32: + fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]int32: + v2, changed2 := fastpathTV.DecMapUint16Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]int64: + fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]int64: + v2, changed2 := fastpathTV.DecMapUint16Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]float32: + fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]float32: + v2, changed2 := fastpathTV.DecMapUint16Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]float64: + fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]float64: + v2, changed2 := fastpathTV.DecMapUint16Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]bool: + fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]bool: + v2, changed2 := fastpathTV.DecMapUint16BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []uint32: + fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, d) + case *[]uint32: + v2, changed2 := fastpathTV.DecSliceUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]interface{}: + fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]interface{}: + v2, changed2 := fastpathTV.DecMapUint32IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]string: + fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]string: + v2, changed2 := fastpathTV.DecMapUint32StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uint: + fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uint: + v2, changed2 := fastpathTV.DecMapUint32UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uint8: + fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uint8: + v2, changed2 := fastpathTV.DecMapUint32Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uint16: + fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uint16: + v2, changed2 := fastpathTV.DecMapUint32Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uint32: + fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uint32: + v2, changed2 := fastpathTV.DecMapUint32Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uint64: + fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uint64: + v2, changed2 := fastpathTV.DecMapUint32Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uintptr: + fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uintptr: + v2, changed2 := fastpathTV.DecMapUint32UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]int: + fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]int: + v2, changed2 := fastpathTV.DecMapUint32IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]int8: + fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]int8: + v2, changed2 := fastpathTV.DecMapUint32Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]int16: + fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]int16: + v2, changed2 := fastpathTV.DecMapUint32Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]int32: + fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]int32: + v2, changed2 := fastpathTV.DecMapUint32Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]int64: + fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]int64: + v2, changed2 := fastpathTV.DecMapUint32Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]float32: + fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]float32: + v2, changed2 := fastpathTV.DecMapUint32Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]float64: + fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]float64: + v2, changed2 := fastpathTV.DecMapUint32Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]bool: + fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]bool: + v2, changed2 := fastpathTV.DecMapUint32BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []uint64: + fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, d) + case *[]uint64: + v2, changed2 := fastpathTV.DecSliceUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]interface{}: + fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]interface{}: + v2, changed2 := fastpathTV.DecMapUint64IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]string: + fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]string: + v2, changed2 := fastpathTV.DecMapUint64StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uint: + fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uint: + v2, changed2 := fastpathTV.DecMapUint64UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uint8: + fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uint8: + v2, changed2 := fastpathTV.DecMapUint64Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uint16: + fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uint16: + v2, changed2 := fastpathTV.DecMapUint64Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uint32: + fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uint32: + v2, changed2 := fastpathTV.DecMapUint64Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uint64: + fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uint64: + v2, changed2 := fastpathTV.DecMapUint64Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uintptr: + fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uintptr: + v2, changed2 := fastpathTV.DecMapUint64UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]int: + fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]int: + v2, changed2 := fastpathTV.DecMapUint64IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]int8: + fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]int8: + v2, changed2 := fastpathTV.DecMapUint64Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]int16: + fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]int16: + v2, changed2 := fastpathTV.DecMapUint64Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]int32: + fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]int32: + v2, changed2 := fastpathTV.DecMapUint64Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]int64: + fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]int64: + v2, changed2 := fastpathTV.DecMapUint64Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]float32: + fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]float32: + v2, changed2 := fastpathTV.DecMapUint64Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]float64: + fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]float64: + v2, changed2 := fastpathTV.DecMapUint64Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]bool: + fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]bool: + v2, changed2 := fastpathTV.DecMapUint64BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []uintptr: + fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, d) + case *[]uintptr: + v2, changed2 := fastpathTV.DecSliceUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]interface{}: + fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]interface{}: + v2, changed2 := fastpathTV.DecMapUintptrIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]string: + fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]string: + v2, changed2 := fastpathTV.DecMapUintptrStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uint: + fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uint: + v2, changed2 := fastpathTV.DecMapUintptrUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uint8: + fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uint8: + v2, changed2 := fastpathTV.DecMapUintptrUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uint16: + fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uint16: + v2, changed2 := fastpathTV.DecMapUintptrUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uint32: + fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uint32: + v2, changed2 := fastpathTV.DecMapUintptrUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uint64: + fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uint64: + v2, changed2 := fastpathTV.DecMapUintptrUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uintptr: + fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uintptr: + v2, changed2 := fastpathTV.DecMapUintptrUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]int: + fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]int: + v2, changed2 := fastpathTV.DecMapUintptrIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]int8: + fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]int8: + v2, changed2 := fastpathTV.DecMapUintptrInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]int16: + fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]int16: + v2, changed2 := fastpathTV.DecMapUintptrInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]int32: + fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]int32: + v2, changed2 := fastpathTV.DecMapUintptrInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]int64: + fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]int64: + v2, changed2 := fastpathTV.DecMapUintptrInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]float32: + fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]float32: + v2, changed2 := fastpathTV.DecMapUintptrFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]float64: + fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]float64: + v2, changed2 := fastpathTV.DecMapUintptrFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]bool: + fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]bool: + v2, changed2 := fastpathTV.DecMapUintptrBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []int: + fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, d) + case *[]int: + v2, changed2 := fastpathTV.DecSliceIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]interface{}: + fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, d) + case *map[int]interface{}: + v2, changed2 := fastpathTV.DecMapIntIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]string: + fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, d) + case *map[int]string: + v2, changed2 := fastpathTV.DecMapIntStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uint: + fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, d) + case *map[int]uint: + v2, changed2 := fastpathTV.DecMapIntUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uint8: + fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, d) + case *map[int]uint8: + v2, changed2 := fastpathTV.DecMapIntUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uint16: + fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, d) + case *map[int]uint16: + v2, changed2 := fastpathTV.DecMapIntUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uint32: + fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, d) + case *map[int]uint32: + v2, changed2 := fastpathTV.DecMapIntUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uint64: + fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, d) + case *map[int]uint64: + v2, changed2 := fastpathTV.DecMapIntUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uintptr: + fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[int]uintptr: + v2, changed2 := fastpathTV.DecMapIntUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]int: + fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, d) + case *map[int]int: + v2, changed2 := fastpathTV.DecMapIntIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]int8: + fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, d) + case *map[int]int8: + v2, changed2 := fastpathTV.DecMapIntInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]int16: + fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, d) + case *map[int]int16: + v2, changed2 := fastpathTV.DecMapIntInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]int32: + fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, d) + case *map[int]int32: + v2, changed2 := fastpathTV.DecMapIntInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]int64: + fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, d) + case *map[int]int64: + v2, changed2 := fastpathTV.DecMapIntInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]float32: + fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[int]float32: + v2, changed2 := fastpathTV.DecMapIntFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]float64: + fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[int]float64: + v2, changed2 := fastpathTV.DecMapIntFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]bool: + fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, d) + case *map[int]bool: + v2, changed2 := fastpathTV.DecMapIntBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []int8: + fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, d) + case *[]int8: + v2, changed2 := fastpathTV.DecSliceInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]interface{}: + fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, d) + case *map[int8]interface{}: + v2, changed2 := fastpathTV.DecMapInt8IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]string: + fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, d) + case *map[int8]string: + v2, changed2 := fastpathTV.DecMapInt8StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uint: + fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, d) + case *map[int8]uint: + v2, changed2 := fastpathTV.DecMapInt8UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uint8: + fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[int8]uint8: + v2, changed2 := fastpathTV.DecMapInt8Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uint16: + fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[int8]uint16: + v2, changed2 := fastpathTV.DecMapInt8Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uint32: + fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[int8]uint32: + v2, changed2 := fastpathTV.DecMapInt8Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uint64: + fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[int8]uint64: + v2, changed2 := fastpathTV.DecMapInt8Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uintptr: + fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[int8]uintptr: + v2, changed2 := fastpathTV.DecMapInt8UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]int: + fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, d) + case *map[int8]int: + v2, changed2 := fastpathTV.DecMapInt8IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]int8: + fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, d) + case *map[int8]int8: + v2, changed2 := fastpathTV.DecMapInt8Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]int16: + fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, d) + case *map[int8]int16: + v2, changed2 := fastpathTV.DecMapInt8Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]int32: + fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, d) + case *map[int8]int32: + v2, changed2 := fastpathTV.DecMapInt8Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]int64: + fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, d) + case *map[int8]int64: + v2, changed2 := fastpathTV.DecMapInt8Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]float32: + fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, d) + case *map[int8]float32: + v2, changed2 := fastpathTV.DecMapInt8Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]float64: + fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, d) + case *map[int8]float64: + v2, changed2 := fastpathTV.DecMapInt8Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]bool: + fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, d) + case *map[int8]bool: + v2, changed2 := fastpathTV.DecMapInt8BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []int16: + fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, d) + case *[]int16: + v2, changed2 := fastpathTV.DecSliceInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]interface{}: + fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, d) + case *map[int16]interface{}: + v2, changed2 := fastpathTV.DecMapInt16IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]string: + fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, d) + case *map[int16]string: + v2, changed2 := fastpathTV.DecMapInt16StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uint: + fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, d) + case *map[int16]uint: + v2, changed2 := fastpathTV.DecMapInt16UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uint8: + fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[int16]uint8: + v2, changed2 := fastpathTV.DecMapInt16Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uint16: + fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[int16]uint16: + v2, changed2 := fastpathTV.DecMapInt16Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uint32: + fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[int16]uint32: + v2, changed2 := fastpathTV.DecMapInt16Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uint64: + fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[int16]uint64: + v2, changed2 := fastpathTV.DecMapInt16Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uintptr: + fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[int16]uintptr: + v2, changed2 := fastpathTV.DecMapInt16UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]int: + fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, d) + case *map[int16]int: + v2, changed2 := fastpathTV.DecMapInt16IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]int8: + fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, d) + case *map[int16]int8: + v2, changed2 := fastpathTV.DecMapInt16Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]int16: + fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, d) + case *map[int16]int16: + v2, changed2 := fastpathTV.DecMapInt16Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]int32: + fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, d) + case *map[int16]int32: + v2, changed2 := fastpathTV.DecMapInt16Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]int64: + fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, d) + case *map[int16]int64: + v2, changed2 := fastpathTV.DecMapInt16Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]float32: + fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, d) + case *map[int16]float32: + v2, changed2 := fastpathTV.DecMapInt16Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]float64: + fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, d) + case *map[int16]float64: + v2, changed2 := fastpathTV.DecMapInt16Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]bool: + fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, d) + case *map[int16]bool: + v2, changed2 := fastpathTV.DecMapInt16BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []int32: + fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, d) + case *[]int32: + v2, changed2 := fastpathTV.DecSliceInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]interface{}: + fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, d) + case *map[int32]interface{}: + v2, changed2 := fastpathTV.DecMapInt32IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]string: + fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, d) + case *map[int32]string: + v2, changed2 := fastpathTV.DecMapInt32StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uint: + fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, d) + case *map[int32]uint: + v2, changed2 := fastpathTV.DecMapInt32UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uint8: + fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[int32]uint8: + v2, changed2 := fastpathTV.DecMapInt32Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uint16: + fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[int32]uint16: + v2, changed2 := fastpathTV.DecMapInt32Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uint32: + fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[int32]uint32: + v2, changed2 := fastpathTV.DecMapInt32Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uint64: + fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[int32]uint64: + v2, changed2 := fastpathTV.DecMapInt32Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uintptr: + fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[int32]uintptr: + v2, changed2 := fastpathTV.DecMapInt32UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]int: + fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, d) + case *map[int32]int: + v2, changed2 := fastpathTV.DecMapInt32IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]int8: + fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, d) + case *map[int32]int8: + v2, changed2 := fastpathTV.DecMapInt32Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]int16: + fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, d) + case *map[int32]int16: + v2, changed2 := fastpathTV.DecMapInt32Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]int32: + fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, d) + case *map[int32]int32: + v2, changed2 := fastpathTV.DecMapInt32Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]int64: + fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, d) + case *map[int32]int64: + v2, changed2 := fastpathTV.DecMapInt32Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]float32: + fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, d) + case *map[int32]float32: + v2, changed2 := fastpathTV.DecMapInt32Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]float64: + fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, d) + case *map[int32]float64: + v2, changed2 := fastpathTV.DecMapInt32Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]bool: + fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, d) + case *map[int32]bool: + v2, changed2 := fastpathTV.DecMapInt32BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []int64: + fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, d) + case *[]int64: + v2, changed2 := fastpathTV.DecSliceInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]interface{}: + fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, d) + case *map[int64]interface{}: + v2, changed2 := fastpathTV.DecMapInt64IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]string: + fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, d) + case *map[int64]string: + v2, changed2 := fastpathTV.DecMapInt64StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uint: + fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, d) + case *map[int64]uint: + v2, changed2 := fastpathTV.DecMapInt64UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uint8: + fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[int64]uint8: + v2, changed2 := fastpathTV.DecMapInt64Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uint16: + fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[int64]uint16: + v2, changed2 := fastpathTV.DecMapInt64Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uint32: + fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[int64]uint32: + v2, changed2 := fastpathTV.DecMapInt64Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uint64: + fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[int64]uint64: + v2, changed2 := fastpathTV.DecMapInt64Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uintptr: + fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[int64]uintptr: + v2, changed2 := fastpathTV.DecMapInt64UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]int: + fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, d) + case *map[int64]int: + v2, changed2 := fastpathTV.DecMapInt64IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]int8: + fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, d) + case *map[int64]int8: + v2, changed2 := fastpathTV.DecMapInt64Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]int16: + fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, d) + case *map[int64]int16: + v2, changed2 := fastpathTV.DecMapInt64Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]int32: + fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, d) + case *map[int64]int32: + v2, changed2 := fastpathTV.DecMapInt64Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]int64: + fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, d) + case *map[int64]int64: + v2, changed2 := fastpathTV.DecMapInt64Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]float32: + fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, d) + case *map[int64]float32: + v2, changed2 := fastpathTV.DecMapInt64Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]float64: + fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, d) + case *map[int64]float64: + v2, changed2 := fastpathTV.DecMapInt64Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]bool: + fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, d) + case *map[int64]bool: + v2, changed2 := fastpathTV.DecMapInt64BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []bool: + fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, d) + case *[]bool: + v2, changed2 := fastpathTV.DecSliceBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]interface{}: + fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, d) + case *map[bool]interface{}: + v2, changed2 := fastpathTV.DecMapBoolIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]string: + fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, d) + case *map[bool]string: + v2, changed2 := fastpathTV.DecMapBoolStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uint: + fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, d) + case *map[bool]uint: + v2, changed2 := fastpathTV.DecMapBoolUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uint8: + fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, d) + case *map[bool]uint8: + v2, changed2 := fastpathTV.DecMapBoolUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uint16: + fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, d) + case *map[bool]uint16: + v2, changed2 := fastpathTV.DecMapBoolUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uint32: + fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, d) + case *map[bool]uint32: + v2, changed2 := fastpathTV.DecMapBoolUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uint64: + fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, d) + case *map[bool]uint64: + v2, changed2 := fastpathTV.DecMapBoolUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uintptr: + fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[bool]uintptr: + v2, changed2 := fastpathTV.DecMapBoolUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]int: + fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, d) + case *map[bool]int: + v2, changed2 := fastpathTV.DecMapBoolIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]int8: + fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, d) + case *map[bool]int8: + v2, changed2 := fastpathTV.DecMapBoolInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]int16: + fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, d) + case *map[bool]int16: + v2, changed2 := fastpathTV.DecMapBoolInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]int32: + fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, d) + case *map[bool]int32: + v2, changed2 := fastpathTV.DecMapBoolInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]int64: + fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, d) + case *map[bool]int64: + v2, changed2 := fastpathTV.DecMapBoolInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]float32: + fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[bool]float32: + v2, changed2 := fastpathTV.DecMapBoolFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]float64: + fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[bool]float64: + v2, changed2 := fastpathTV.DecMapBoolFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]bool: + fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, d) + case *map[bool]bool: + v2, changed2 := fastpathTV.DecMapBoolBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +// -- -- fast path functions + +func (f *decFnInfo) fastpathDecSliceIntfR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]interface{}) + v, changed := fastpathTV.DecSliceIntfV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]interface{}) + fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceIntfX(vp *[]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecSliceIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool, d *Decoder) (_ []interface{}, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []interface{}{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]interface{}, xlen) + } + } else { + v = make([]interface{}, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + d.decode(&v[j]) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, nil) + slh.ElemContainerState(j) + d.decode(&v[j]) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []interface{}{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]interface{}, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, nil) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + d.decode(&v[j]) + + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceStringR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]string) + v, changed := fastpathTV.DecSliceStringV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]string) + fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceStringX(vp *[]string, checkNil bool, d *Decoder) { + v, changed := f.DecSliceStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d *Decoder) (_ []string, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []string{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]string, xlen) + } + } else { + v = make([]string, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = dd.DecodeString() + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, "") + slh.ElemContainerState(j) + v[j] = dd.DecodeString() + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []string{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]string, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, "") + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = dd.DecodeString() + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceFloat32R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]float32) + v, changed := fastpathTV.DecSliceFloat32V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]float32) + fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceFloat32X(vp *[]float32, checkNil bool, d *Decoder) { + v, changed := f.DecSliceFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool, d *Decoder) (_ []float32, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []float32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]float32, xlen) + } + } else { + v = make([]float32, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = float32(dd.DecodeFloat(true)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = float32(dd.DecodeFloat(true)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []float32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]float32, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = float32(dd.DecodeFloat(true)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceFloat64R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]float64) + v, changed := fastpathTV.DecSliceFloat64V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]float64) + fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceFloat64X(vp *[]float64, checkNil bool, d *Decoder) { + v, changed := f.DecSliceFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool, d *Decoder) (_ []float64, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []float64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]float64, xlen) + } + } else { + v = make([]float64, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = dd.DecodeFloat(false) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = dd.DecodeFloat(false) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []float64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]float64, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = dd.DecodeFloat(false) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceUintR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]uint) + v, changed := fastpathTV.DecSliceUintV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]uint) + fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceUintX(vp *[]uint, checkNil bool, d *Decoder) { + v, changed := f.DecSliceUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Decoder) (_ []uint, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint, xlen) + } + } else { + v = make([]uint, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = uint(dd.DecodeUint(uintBitsize)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = uint(dd.DecodeUint(uintBitsize)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []uint{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]uint, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = uint(dd.DecodeUint(uintBitsize)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceUint16R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]uint16) + v, changed := fastpathTV.DecSliceUint16V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]uint16) + fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceUint16X(vp *[]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecSliceUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d *Decoder) (_ []uint16, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint16, xlen) + } + } else { + v = make([]uint16, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = uint16(dd.DecodeUint(16)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = uint16(dd.DecodeUint(16)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []uint16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]uint16, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = uint16(dd.DecodeUint(16)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceUint32R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]uint32) + v, changed := fastpathTV.DecSliceUint32V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]uint32) + fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceUint32X(vp *[]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecSliceUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d *Decoder) (_ []uint32, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint32, xlen) + } + } else { + v = make([]uint32, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = uint32(dd.DecodeUint(32)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = uint32(dd.DecodeUint(32)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []uint32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]uint32, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = uint32(dd.DecodeUint(32)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceUint64R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]uint64) + v, changed := fastpathTV.DecSliceUint64V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]uint64) + fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceUint64X(vp *[]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecSliceUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d *Decoder) (_ []uint64, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint64, xlen) + } + } else { + v = make([]uint64, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = dd.DecodeUint(64) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = dd.DecodeUint(64) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []uint64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]uint64, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = dd.DecodeUint(64) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceUintptrR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]uintptr) + v, changed := fastpathTV.DecSliceUintptrV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]uintptr) + fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecSliceUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool, d *Decoder) (_ []uintptr, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uintptr{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uintptr, xlen) + } + } else { + v = make([]uintptr, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = uintptr(dd.DecodeUint(uintBitsize)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = uintptr(dd.DecodeUint(uintBitsize)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []uintptr{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]uintptr, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = uintptr(dd.DecodeUint(uintBitsize)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceIntR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]int) + v, changed := fastpathTV.DecSliceIntV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]int) + fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceIntX(vp *[]int, checkNil bool, d *Decoder) { + v, changed := f.DecSliceIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decoder) (_ []int, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int, xlen) + } + } else { + v = make([]int, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = int(dd.DecodeInt(intBitsize)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = int(dd.DecodeInt(intBitsize)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []int{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]int, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = int(dd.DecodeInt(intBitsize)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceInt8R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]int8) + v, changed := fastpathTV.DecSliceInt8V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]int8) + fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceInt8X(vp *[]int8, checkNil bool, d *Decoder) { + v, changed := f.DecSliceInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Decoder) (_ []int8, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int8{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int8, xlen) + } + } else { + v = make([]int8, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = int8(dd.DecodeInt(8)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = int8(dd.DecodeInt(8)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []int8{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]int8, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = int8(dd.DecodeInt(8)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceInt16R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]int16) + v, changed := fastpathTV.DecSliceInt16V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]int16) + fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceInt16X(vp *[]int16, checkNil bool, d *Decoder) { + v, changed := f.DecSliceInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *Decoder) (_ []int16, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int16, xlen) + } + } else { + v = make([]int16, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = int16(dd.DecodeInt(16)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = int16(dd.DecodeInt(16)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []int16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]int16, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = int16(dd.DecodeInt(16)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceInt32R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]int32) + v, changed := fastpathTV.DecSliceInt32V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]int32) + fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceInt32X(vp *[]int32, checkNil bool, d *Decoder) { + v, changed := f.DecSliceInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *Decoder) (_ []int32, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int32, xlen) + } + } else { + v = make([]int32, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = int32(dd.DecodeInt(32)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = int32(dd.DecodeInt(32)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []int32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]int32, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = int32(dd.DecodeInt(32)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceInt64R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]int64) + v, changed := fastpathTV.DecSliceInt64V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]int64) + fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceInt64X(vp *[]int64, checkNil bool, d *Decoder) { + v, changed := f.DecSliceInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *Decoder) (_ []int64, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int64, xlen) + } + } else { + v = make([]int64, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = dd.DecodeInt(64) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = dd.DecodeInt(64) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []int64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]int64, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = dd.DecodeInt(64) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceBoolR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]bool) + v, changed := fastpathTV.DecSliceBoolV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]bool) + fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceBoolX(vp *[]bool, checkNil bool, d *Decoder) { + v, changed := f.DecSliceBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Decoder) (_ []bool, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []bool{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]bool, xlen) + } + } else { + v = make([]bool, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = dd.DecodeBool() + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, false) + slh.ElemContainerState(j) + v[j] = dd.DecodeBool() + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []bool{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]bool, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, false) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = dd.DecodeBool() + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]interface{}) + v, changed := fastpathTV.DecMapIntfIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]interface{}) + fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[interface{}]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk interface{} + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]string) + v, changed := fastpathTV.DecMapIntfStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]string) + fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[interface{}]string, xlen) + changed = true + } + + var mk interface{} + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uint) + v, changed := fastpathTV.DecMapIntfUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uint) + fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uint, xlen) + changed = true + } + + var mk interface{} + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uint8) + v, changed := fastpathTV.DecMapIntfUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uint8) + fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]uint8, xlen) + changed = true + } + + var mk interface{} + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uint16) + v, changed := fastpathTV.DecMapIntfUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uint16) + fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[interface{}]uint16, xlen) + changed = true + } + + var mk interface{} + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uint32) + v, changed := fastpathTV.DecMapIntfUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uint32) + fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]uint32, xlen) + changed = true + } + + var mk interface{} + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uint64) + v, changed := fastpathTV.DecMapIntfUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uint64) + fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uint64, xlen) + changed = true + } + + var mk interface{} + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uintptr) + v, changed := fastpathTV.DecMapIntfUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uintptr) + fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uintptr, xlen) + changed = true + } + + var mk interface{} + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]int) + v, changed := fastpathTV.DecMapIntfIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]int) + fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]int, xlen) + changed = true + } + + var mk interface{} + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]int8) + v, changed := fastpathTV.DecMapIntfInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]int8) + fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]int8, xlen) + changed = true + } + + var mk interface{} + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]int16) + v, changed := fastpathTV.DecMapIntfInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]int16) + fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[interface{}]int16, xlen) + changed = true + } + + var mk interface{} + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]int32) + v, changed := fastpathTV.DecMapIntfInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]int32) + fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]int32, xlen) + changed = true + } + + var mk interface{} + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]int64) + v, changed := fastpathTV.DecMapIntfInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]int64) + fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]int64, xlen) + changed = true + } + + var mk interface{} + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]float32) + v, changed := fastpathTV.DecMapIntfFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]float32) + fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]float32, xlen) + changed = true + } + + var mk interface{} + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]float64) + v, changed := fastpathTV.DecMapIntfFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]float64) + fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]float64, xlen) + changed = true + } + + var mk interface{} + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]bool) + v, changed := fastpathTV.DecMapIntfBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]bool) + fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]bool, xlen) + changed = true + } + + var mk interface{} + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]interface{}) + v, changed := fastpathTV.DecMapStringIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]interface{}) + fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[string]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[string]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk string + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]string) + v, changed := fastpathTV.DecMapStringStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]string) + fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringStringX(vp *map[string]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringStringV(v map[string]string, checkNil bool, canChange bool, + d *Decoder) (_ map[string]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[string]string, xlen) + changed = true + } + + var mk string + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uint) + v, changed := fastpathTV.DecMapStringUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uint) + fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUintX(vp *map[string]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUintV(v map[string]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uint, xlen) + changed = true + } + + var mk string + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uint8) + v, changed := fastpathTV.DecMapStringUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uint8) + fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]uint8, xlen) + changed = true + } + + var mk string + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uint16) + v, changed := fastpathTV.DecMapStringUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uint16) + fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[string]uint16, xlen) + changed = true + } + + var mk string + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uint32) + v, changed := fastpathTV.DecMapStringUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uint32) + fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]uint32, xlen) + changed = true + } + + var mk string + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uint64) + v, changed := fastpathTV.DecMapStringUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uint64) + fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uint64, xlen) + changed = true + } + + var mk string + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uintptr) + v, changed := fastpathTV.DecMapStringUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uintptr) + fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uintptr, xlen) + changed = true + } + + var mk string + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]int) + v, changed := fastpathTV.DecMapStringIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]int) + fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringIntX(vp *map[string]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringIntV(v map[string]int, checkNil bool, canChange bool, + d *Decoder) (_ map[string]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]int, xlen) + changed = true + } + + var mk string + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]int8) + v, changed := fastpathTV.DecMapStringInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]int8) + fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt8V(v map[string]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[string]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]int8, xlen) + changed = true + } + + var mk string + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]int16) + v, changed := fastpathTV.DecMapStringInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]int16) + fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt16V(v map[string]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[string]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[string]int16, xlen) + changed = true + } + + var mk string + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]int32) + v, changed := fastpathTV.DecMapStringInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]int32) + fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt32V(v map[string]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[string]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]int32, xlen) + changed = true + } + + var mk string + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]int64) + v, changed := fastpathTV.DecMapStringInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]int64) + fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt64V(v map[string]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[string]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]int64, xlen) + changed = true + } + + var mk string + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]float32) + v, changed := fastpathTV.DecMapStringFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]float32) + fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[string]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]float32, xlen) + changed = true + } + + var mk string + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]float64) + v, changed := fastpathTV.DecMapStringFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]float64) + fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[string]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]float64, xlen) + changed = true + } + + var mk string + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]bool) + v, changed := fastpathTV.DecMapStringBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]bool) + fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringBoolV(v map[string]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[string]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]bool, xlen) + changed = true + } + + var mk string + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]interface{}) + v, changed := fastpathTV.DecMapFloat32IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]interface{}) + fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[float32]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk float32 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]string) + v, changed := fastpathTV.DecMapFloat32StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]string) + fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[float32]string, xlen) + changed = true + } + + var mk float32 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uint) + v, changed := fastpathTV.DecMapFloat32UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uint) + fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uint, xlen) + changed = true + } + + var mk float32 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uint8) + v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uint8) + fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]uint8, xlen) + changed = true + } + + var mk float32 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uint16) + v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uint16) + fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[float32]uint16, xlen) + changed = true + } + + var mk float32 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uint32) + v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uint32) + fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]uint32, xlen) + changed = true + } + + var mk float32 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uint64) + v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uint64) + fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uint64, xlen) + changed = true + } + + var mk float32 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uintptr) + v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uintptr) + fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uintptr, xlen) + changed = true + } + + var mk float32 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]int) + v, changed := fastpathTV.DecMapFloat32IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]int) + fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]int, xlen) + changed = true + } + + var mk float32 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]int8) + v, changed := fastpathTV.DecMapFloat32Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]int8) + fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]int8, xlen) + changed = true + } + + var mk float32 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]int16) + v, changed := fastpathTV.DecMapFloat32Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]int16) + fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[float32]int16, xlen) + changed = true + } + + var mk float32 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]int32) + v, changed := fastpathTV.DecMapFloat32Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]int32) + fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]int32, xlen) + changed = true + } + + var mk float32 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]int64) + v, changed := fastpathTV.DecMapFloat32Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]int64) + fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]int64, xlen) + changed = true + } + + var mk float32 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]float32) + v, changed := fastpathTV.DecMapFloat32Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]float32) + fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]float32, xlen) + changed = true + } + + var mk float32 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]float64) + v, changed := fastpathTV.DecMapFloat32Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]float64) + fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]float64, xlen) + changed = true + } + + var mk float32 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]bool) + v, changed := fastpathTV.DecMapFloat32BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]bool) + fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]bool, xlen) + changed = true + } + + var mk float32 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]interface{}) + v, changed := fastpathTV.DecMapFloat64IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]interface{}) + fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[float64]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk float64 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]string) + v, changed := fastpathTV.DecMapFloat64StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]string) + fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[float64]string, xlen) + changed = true + } + + var mk float64 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uint) + v, changed := fastpathTV.DecMapFloat64UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uint) + fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uint, xlen) + changed = true + } + + var mk float64 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uint8) + v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uint8) + fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]uint8, xlen) + changed = true + } + + var mk float64 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uint16) + v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uint16) + fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[float64]uint16, xlen) + changed = true + } + + var mk float64 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uint32) + v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uint32) + fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]uint32, xlen) + changed = true + } + + var mk float64 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uint64) + v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uint64) + fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uint64, xlen) + changed = true + } + + var mk float64 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uintptr) + v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uintptr) + fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uintptr, xlen) + changed = true + } + + var mk float64 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]int) + v, changed := fastpathTV.DecMapFloat64IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]int) + fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]int, xlen) + changed = true + } + + var mk float64 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]int8) + v, changed := fastpathTV.DecMapFloat64Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]int8) + fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]int8, xlen) + changed = true + } + + var mk float64 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]int16) + v, changed := fastpathTV.DecMapFloat64Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]int16) + fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[float64]int16, xlen) + changed = true + } + + var mk float64 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]int32) + v, changed := fastpathTV.DecMapFloat64Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]int32) + fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]int32, xlen) + changed = true + } + + var mk float64 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]int64) + v, changed := fastpathTV.DecMapFloat64Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]int64) + fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]int64, xlen) + changed = true + } + + var mk float64 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]float32) + v, changed := fastpathTV.DecMapFloat64Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]float32) + fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]float32, xlen) + changed = true + } + + var mk float64 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]float64) + v, changed := fastpathTV.DecMapFloat64Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]float64) + fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]float64, xlen) + changed = true + } + + var mk float64 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]bool) + v, changed := fastpathTV.DecMapFloat64BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]bool) + fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]bool, xlen) + changed = true + } + + var mk float64 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]interface{}) + v, changed := fastpathTV.DecMapUintIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]interface{}) + fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]string) + v, changed := fastpathTV.DecMapUintStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]string) + fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintStringX(vp *map[uint]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintStringV(v map[uint]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint]string, xlen) + changed = true + } + + var mk uint + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uint) + v, changed := fastpathTV.DecMapUintUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uint) + fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUintV(v map[uint]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uint, xlen) + changed = true + } + + var mk uint + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uint8) + v, changed := fastpathTV.DecMapUintUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uint8) + fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]uint8, xlen) + changed = true + } + + var mk uint + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uint16) + v, changed := fastpathTV.DecMapUintUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uint16) + fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint]uint16, xlen) + changed = true + } + + var mk uint + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uint32) + v, changed := fastpathTV.DecMapUintUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uint32) + fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]uint32, xlen) + changed = true + } + + var mk uint + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uint64) + v, changed := fastpathTV.DecMapUintUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uint64) + fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uint64, xlen) + changed = true + } + + var mk uint + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uintptr) + v, changed := fastpathTV.DecMapUintUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uintptr) + fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uintptr, xlen) + changed = true + } + + var mk uint + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]int) + v, changed := fastpathTV.DecMapUintIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]int) + fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintIntX(vp *map[uint]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintIntV(v map[uint]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]int, xlen) + changed = true + } + + var mk uint + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]int8) + v, changed := fastpathTV.DecMapUintInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]int8) + fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]int8, xlen) + changed = true + } + + var mk uint + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]int16) + v, changed := fastpathTV.DecMapUintInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]int16) + fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint]int16, xlen) + changed = true + } + + var mk uint + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]int32) + v, changed := fastpathTV.DecMapUintInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]int32) + fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]int32, xlen) + changed = true + } + + var mk uint + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]int64) + v, changed := fastpathTV.DecMapUintInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]int64) + fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]int64, xlen) + changed = true + } + + var mk uint + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]float32) + v, changed := fastpathTV.DecMapUintFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]float32) + fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]float32, xlen) + changed = true + } + + var mk uint + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]float64) + v, changed := fastpathTV.DecMapUintFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]float64) + fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]float64, xlen) + changed = true + } + + var mk uint + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]bool) + v, changed := fastpathTV.DecMapUintBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]bool) + fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]bool, xlen) + changed = true + } + + var mk uint + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]interface{}) + v, changed := fastpathTV.DecMapUint8IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]interface{}) + fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[uint8]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint8 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]string) + v, changed := fastpathTV.DecMapUint8StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]string) + fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[uint8]string, xlen) + changed = true + } + + var mk uint8 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uint) + v, changed := fastpathTV.DecMapUint8UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uint) + fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uint, xlen) + changed = true + } + + var mk uint8 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uint8) + v, changed := fastpathTV.DecMapUint8Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uint8) + fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]uint8, xlen) + changed = true + } + + var mk uint8 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uint16) + v, changed := fastpathTV.DecMapUint8Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uint16) + fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint8]uint16, xlen) + changed = true + } + + var mk uint8 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uint32) + v, changed := fastpathTV.DecMapUint8Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uint32) + fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]uint32, xlen) + changed = true + } + + var mk uint8 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uint64) + v, changed := fastpathTV.DecMapUint8Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uint64) + fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uint64, xlen) + changed = true + } + + var mk uint8 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uintptr) + v, changed := fastpathTV.DecMapUint8UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uintptr) + fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uintptr, xlen) + changed = true + } + + var mk uint8 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]int) + v, changed := fastpathTV.DecMapUint8IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]int) + fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]int, xlen) + changed = true + } + + var mk uint8 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]int8) + v, changed := fastpathTV.DecMapUint8Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]int8) + fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]int8, xlen) + changed = true + } + + var mk uint8 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]int16) + v, changed := fastpathTV.DecMapUint8Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]int16) + fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint8]int16, xlen) + changed = true + } + + var mk uint8 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]int32) + v, changed := fastpathTV.DecMapUint8Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]int32) + fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]int32, xlen) + changed = true + } + + var mk uint8 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]int64) + v, changed := fastpathTV.DecMapUint8Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]int64) + fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]int64, xlen) + changed = true + } + + var mk uint8 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]float32) + v, changed := fastpathTV.DecMapUint8Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]float32) + fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]float32, xlen) + changed = true + } + + var mk uint8 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]float64) + v, changed := fastpathTV.DecMapUint8Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]float64) + fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]float64, xlen) + changed = true + } + + var mk uint8 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]bool) + v, changed := fastpathTV.DecMapUint8BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]bool) + fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]bool, xlen) + changed = true + } + + var mk uint8 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]interface{}) + v, changed := fastpathTV.DecMapUint16IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]interface{}) + fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[uint16]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint16 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]string) + v, changed := fastpathTV.DecMapUint16StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]string) + fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[uint16]string, xlen) + changed = true + } + + var mk uint16 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uint) + v, changed := fastpathTV.DecMapUint16UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uint) + fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uint, xlen) + changed = true + } + + var mk uint16 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uint8) + v, changed := fastpathTV.DecMapUint16Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uint8) + fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]uint8, xlen) + changed = true + } + + var mk uint16 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uint16) + v, changed := fastpathTV.DecMapUint16Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uint16) + fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[uint16]uint16, xlen) + changed = true + } + + var mk uint16 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uint32) + v, changed := fastpathTV.DecMapUint16Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uint32) + fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]uint32, xlen) + changed = true + } + + var mk uint16 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uint64) + v, changed := fastpathTV.DecMapUint16Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uint64) + fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uint64, xlen) + changed = true + } + + var mk uint16 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uintptr) + v, changed := fastpathTV.DecMapUint16UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uintptr) + fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uintptr, xlen) + changed = true + } + + var mk uint16 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]int) + v, changed := fastpathTV.DecMapUint16IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]int) + fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]int, xlen) + changed = true + } + + var mk uint16 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]int8) + v, changed := fastpathTV.DecMapUint16Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]int8) + fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]int8, xlen) + changed = true + } + + var mk uint16 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]int16) + v, changed := fastpathTV.DecMapUint16Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]int16) + fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[uint16]int16, xlen) + changed = true + } + + var mk uint16 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]int32) + v, changed := fastpathTV.DecMapUint16Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]int32) + fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]int32, xlen) + changed = true + } + + var mk uint16 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]int64) + v, changed := fastpathTV.DecMapUint16Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]int64) + fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]int64, xlen) + changed = true + } + + var mk uint16 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]float32) + v, changed := fastpathTV.DecMapUint16Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]float32) + fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]float32, xlen) + changed = true + } + + var mk uint16 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]float64) + v, changed := fastpathTV.DecMapUint16Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]float64) + fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]float64, xlen) + changed = true + } + + var mk uint16 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]bool) + v, changed := fastpathTV.DecMapUint16BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]bool) + fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]bool, xlen) + changed = true + } + + var mk uint16 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]interface{}) + v, changed := fastpathTV.DecMapUint32IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]interface{}) + fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[uint32]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint32 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]string) + v, changed := fastpathTV.DecMapUint32StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]string) + fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[uint32]string, xlen) + changed = true + } + + var mk uint32 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uint) + v, changed := fastpathTV.DecMapUint32UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uint) + fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uint, xlen) + changed = true + } + + var mk uint32 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uint8) + v, changed := fastpathTV.DecMapUint32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uint8) + fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]uint8, xlen) + changed = true + } + + var mk uint32 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uint16) + v, changed := fastpathTV.DecMapUint32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uint16) + fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint32]uint16, xlen) + changed = true + } + + var mk uint32 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uint32) + v, changed := fastpathTV.DecMapUint32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uint32) + fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]uint32, xlen) + changed = true + } + + var mk uint32 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uint64) + v, changed := fastpathTV.DecMapUint32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uint64) + fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uint64, xlen) + changed = true + } + + var mk uint32 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uintptr) + v, changed := fastpathTV.DecMapUint32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uintptr) + fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uintptr, xlen) + changed = true + } + + var mk uint32 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]int) + v, changed := fastpathTV.DecMapUint32IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]int) + fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]int, xlen) + changed = true + } + + var mk uint32 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]int8) + v, changed := fastpathTV.DecMapUint32Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]int8) + fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]int8, xlen) + changed = true + } + + var mk uint32 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]int16) + v, changed := fastpathTV.DecMapUint32Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]int16) + fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint32]int16, xlen) + changed = true + } + + var mk uint32 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]int32) + v, changed := fastpathTV.DecMapUint32Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]int32) + fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]int32, xlen) + changed = true + } + + var mk uint32 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]int64) + v, changed := fastpathTV.DecMapUint32Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]int64) + fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]int64, xlen) + changed = true + } + + var mk uint32 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]float32) + v, changed := fastpathTV.DecMapUint32Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]float32) + fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]float32, xlen) + changed = true + } + + var mk uint32 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]float64) + v, changed := fastpathTV.DecMapUint32Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]float64) + fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]float64, xlen) + changed = true + } + + var mk uint32 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]bool) + v, changed := fastpathTV.DecMapUint32BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]bool) + fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]bool, xlen) + changed = true + } + + var mk uint32 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]interface{}) + v, changed := fastpathTV.DecMapUint64IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]interface{}) + fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint64]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint64 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]string) + v, changed := fastpathTV.DecMapUint64StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]string) + fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint64]string, xlen) + changed = true + } + + var mk uint64 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uint) + v, changed := fastpathTV.DecMapUint64UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uint) + fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uint, xlen) + changed = true + } + + var mk uint64 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uint8) + v, changed := fastpathTV.DecMapUint64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uint8) + fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]uint8, xlen) + changed = true + } + + var mk uint64 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uint16) + v, changed := fastpathTV.DecMapUint64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uint16) + fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint64]uint16, xlen) + changed = true + } + + var mk uint64 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uint32) + v, changed := fastpathTV.DecMapUint64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uint32) + fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]uint32, xlen) + changed = true + } + + var mk uint64 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uint64) + v, changed := fastpathTV.DecMapUint64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uint64) + fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uint64, xlen) + changed = true + } + + var mk uint64 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uintptr) + v, changed := fastpathTV.DecMapUint64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uintptr) + fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uintptr, xlen) + changed = true + } + + var mk uint64 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]int) + v, changed := fastpathTV.DecMapUint64IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]int) + fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]int, xlen) + changed = true + } + + var mk uint64 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]int8) + v, changed := fastpathTV.DecMapUint64Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]int8) + fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]int8, xlen) + changed = true + } + + var mk uint64 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]int16) + v, changed := fastpathTV.DecMapUint64Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]int16) + fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint64]int16, xlen) + changed = true + } + + var mk uint64 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]int32) + v, changed := fastpathTV.DecMapUint64Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]int32) + fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]int32, xlen) + changed = true + } + + var mk uint64 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]int64) + v, changed := fastpathTV.DecMapUint64Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]int64) + fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]int64, xlen) + changed = true + } + + var mk uint64 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]float32) + v, changed := fastpathTV.DecMapUint64Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]float32) + fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]float32, xlen) + changed = true + } + + var mk uint64 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]float64) + v, changed := fastpathTV.DecMapUint64Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]float64) + fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]float64, xlen) + changed = true + } + + var mk uint64 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]bool) + v, changed := fastpathTV.DecMapUint64BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]bool) + fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]bool, xlen) + changed = true + } + + var mk uint64 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]interface{}) + v, changed := fastpathTV.DecMapUintptrIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]interface{}) + fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uintptr]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uintptr + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]string) + v, changed := fastpathTV.DecMapUintptrStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]string) + fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uintptr]string, xlen) + changed = true + } + + var mk uintptr + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uint) + v, changed := fastpathTV.DecMapUintptrUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uint) + fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uint, xlen) + changed = true + } + + var mk uintptr + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uint8) + v, changed := fastpathTV.DecMapUintptrUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uint8) + fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]uint8, xlen) + changed = true + } + + var mk uintptr + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uint16) + v, changed := fastpathTV.DecMapUintptrUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uint16) + fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uintptr]uint16, xlen) + changed = true + } + + var mk uintptr + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uint32) + v, changed := fastpathTV.DecMapUintptrUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uint32) + fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]uint32, xlen) + changed = true + } + + var mk uintptr + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uint64) + v, changed := fastpathTV.DecMapUintptrUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uint64) + fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uint64, xlen) + changed = true + } + + var mk uintptr + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uintptr) + v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uintptr) + fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uintptr, xlen) + changed = true + } + + var mk uintptr + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]int) + v, changed := fastpathTV.DecMapUintptrIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]int) + fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]int, xlen) + changed = true + } + + var mk uintptr + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]int8) + v, changed := fastpathTV.DecMapUintptrInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]int8) + fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]int8, xlen) + changed = true + } + + var mk uintptr + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]int16) + v, changed := fastpathTV.DecMapUintptrInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]int16) + fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uintptr]int16, xlen) + changed = true + } + + var mk uintptr + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]int32) + v, changed := fastpathTV.DecMapUintptrInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]int32) + fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]int32, xlen) + changed = true + } + + var mk uintptr + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]int64) + v, changed := fastpathTV.DecMapUintptrInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]int64) + fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]int64, xlen) + changed = true + } + + var mk uintptr + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]float32) + v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]float32) + fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]float32, xlen) + changed = true + } + + var mk uintptr + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]float64) + v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]float64) + fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]float64, xlen) + changed = true + } + + var mk uintptr + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]bool) + v, changed := fastpathTV.DecMapUintptrBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]bool) + fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]bool, xlen) + changed = true + } + + var mk uintptr + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]interface{}) + v, changed := fastpathTV.DecMapIntIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]interface{}) + fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[int]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]string) + v, changed := fastpathTV.DecMapIntStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]string) + fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntStringX(vp *map[int]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntStringV(v map[int]string, checkNil bool, canChange bool, + d *Decoder) (_ map[int]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int]string, xlen) + changed = true + } + + var mk int + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uint) + v, changed := fastpathTV.DecMapIntUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uint) + fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUintX(vp *map[int]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUintV(v map[int]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uint, xlen) + changed = true + } + + var mk int + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uint8) + v, changed := fastpathTV.DecMapIntUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uint8) + fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]uint8, xlen) + changed = true + } + + var mk int + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uint16) + v, changed := fastpathTV.DecMapIntUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uint16) + fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int]uint16, xlen) + changed = true + } + + var mk int + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uint32) + v, changed := fastpathTV.DecMapIntUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uint32) + fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]uint32, xlen) + changed = true + } + + var mk int + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uint64) + v, changed := fastpathTV.DecMapIntUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uint64) + fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uint64, xlen) + changed = true + } + + var mk int + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uintptr) + v, changed := fastpathTV.DecMapIntUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uintptr) + fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uintptr, xlen) + changed = true + } + + var mk int + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]int) + v, changed := fastpathTV.DecMapIntIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]int) + fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntIntX(vp *map[int]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntIntV(v map[int]int, checkNil bool, canChange bool, + d *Decoder) (_ map[int]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]int, xlen) + changed = true + } + + var mk int + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]int8) + v, changed := fastpathTV.DecMapIntInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]int8) + fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt8V(v map[int]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[int]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]int8, xlen) + changed = true + } + + var mk int + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]int16) + v, changed := fastpathTV.DecMapIntInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]int16) + fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt16V(v map[int]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[int]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int]int16, xlen) + changed = true + } + + var mk int + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]int32) + v, changed := fastpathTV.DecMapIntInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]int32) + fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt32V(v map[int]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[int]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]int32, xlen) + changed = true + } + + var mk int + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]int64) + v, changed := fastpathTV.DecMapIntInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]int64) + fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt64V(v map[int]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[int]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]int64, xlen) + changed = true + } + + var mk int + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]float32) + v, changed := fastpathTV.DecMapIntFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]float32) + fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[int]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]float32, xlen) + changed = true + } + + var mk int + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]float64) + v, changed := fastpathTV.DecMapIntFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]float64) + fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[int]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]float64, xlen) + changed = true + } + + var mk int + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]bool) + v, changed := fastpathTV.DecMapIntBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]bool) + fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntBoolV(v map[int]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[int]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]bool, xlen) + changed = true + } + + var mk int + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]interface{}) + v, changed := fastpathTV.DecMapInt8IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]interface{}) + fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[int8]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int8 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]string) + v, changed := fastpathTV.DecMapInt8StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]string) + fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8StringV(v map[int8]string, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[int8]string, xlen) + changed = true + } + + var mk int8 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uint) + v, changed := fastpathTV.DecMapInt8UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uint) + fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uint, xlen) + changed = true + } + + var mk int8 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uint8) + v, changed := fastpathTV.DecMapInt8Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uint8) + fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]uint8, xlen) + changed = true + } + + var mk int8 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uint16) + v, changed := fastpathTV.DecMapInt8Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uint16) + fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int8]uint16, xlen) + changed = true + } + + var mk int8 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uint32) + v, changed := fastpathTV.DecMapInt8Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uint32) + fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]uint32, xlen) + changed = true + } + + var mk int8 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uint64) + v, changed := fastpathTV.DecMapInt8Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uint64) + fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uint64, xlen) + changed = true + } + + var mk int8 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uintptr) + v, changed := fastpathTV.DecMapInt8UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uintptr) + fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uintptr, xlen) + changed = true + } + + var mk int8 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]int) + v, changed := fastpathTV.DecMapInt8IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]int) + fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8IntV(v map[int8]int, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]int, xlen) + changed = true + } + + var mk int8 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]int8) + v, changed := fastpathTV.DecMapInt8Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]int8) + fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]int8, xlen) + changed = true + } + + var mk int8 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]int16) + v, changed := fastpathTV.DecMapInt8Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]int16) + fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int8]int16, xlen) + changed = true + } + + var mk int8 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]int32) + v, changed := fastpathTV.DecMapInt8Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]int32) + fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]int32, xlen) + changed = true + } + + var mk int8 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]int64) + v, changed := fastpathTV.DecMapInt8Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]int64) + fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]int64, xlen) + changed = true + } + + var mk int8 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]float32) + v, changed := fastpathTV.DecMapInt8Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]float32) + fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]float32, xlen) + changed = true + } + + var mk int8 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]float64) + v, changed := fastpathTV.DecMapInt8Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]float64) + fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]float64, xlen) + changed = true + } + + var mk int8 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]bool) + v, changed := fastpathTV.DecMapInt8BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]bool) + fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]bool, xlen) + changed = true + } + + var mk int8 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]interface{}) + v, changed := fastpathTV.DecMapInt16IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]interface{}) + fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[int16]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int16 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]string) + v, changed := fastpathTV.DecMapInt16StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]string) + fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16StringV(v map[int16]string, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[int16]string, xlen) + changed = true + } + + var mk int16 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uint) + v, changed := fastpathTV.DecMapInt16UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uint) + fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uint, xlen) + changed = true + } + + var mk int16 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uint8) + v, changed := fastpathTV.DecMapInt16Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uint8) + fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]uint8, xlen) + changed = true + } + + var mk int16 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uint16) + v, changed := fastpathTV.DecMapInt16Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uint16) + fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[int16]uint16, xlen) + changed = true + } + + var mk int16 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uint32) + v, changed := fastpathTV.DecMapInt16Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uint32) + fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]uint32, xlen) + changed = true + } + + var mk int16 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uint64) + v, changed := fastpathTV.DecMapInt16Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uint64) + fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uint64, xlen) + changed = true + } + + var mk int16 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uintptr) + v, changed := fastpathTV.DecMapInt16UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uintptr) + fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uintptr, xlen) + changed = true + } + + var mk int16 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]int) + v, changed := fastpathTV.DecMapInt16IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]int) + fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16IntV(v map[int16]int, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]int, xlen) + changed = true + } + + var mk int16 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]int8) + v, changed := fastpathTV.DecMapInt16Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]int8) + fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]int8, xlen) + changed = true + } + + var mk int16 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]int16) + v, changed := fastpathTV.DecMapInt16Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]int16) + fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[int16]int16, xlen) + changed = true + } + + var mk int16 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]int32) + v, changed := fastpathTV.DecMapInt16Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]int32) + fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]int32, xlen) + changed = true + } + + var mk int16 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]int64) + v, changed := fastpathTV.DecMapInt16Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]int64) + fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]int64, xlen) + changed = true + } + + var mk int16 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]float32) + v, changed := fastpathTV.DecMapInt16Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]float32) + fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]float32, xlen) + changed = true + } + + var mk int16 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]float64) + v, changed := fastpathTV.DecMapInt16Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]float64) + fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]float64, xlen) + changed = true + } + + var mk int16 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]bool) + v, changed := fastpathTV.DecMapInt16BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]bool) + fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]bool, xlen) + changed = true + } + + var mk int16 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]interface{}) + v, changed := fastpathTV.DecMapInt32IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]interface{}) + fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[int32]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int32 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]string) + v, changed := fastpathTV.DecMapInt32StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]string) + fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32StringV(v map[int32]string, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[int32]string, xlen) + changed = true + } + + var mk int32 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uint) + v, changed := fastpathTV.DecMapInt32UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uint) + fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uint, xlen) + changed = true + } + + var mk int32 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uint8) + v, changed := fastpathTV.DecMapInt32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uint8) + fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]uint8, xlen) + changed = true + } + + var mk int32 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uint16) + v, changed := fastpathTV.DecMapInt32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uint16) + fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int32]uint16, xlen) + changed = true + } + + var mk int32 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uint32) + v, changed := fastpathTV.DecMapInt32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uint32) + fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]uint32, xlen) + changed = true + } + + var mk int32 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uint64) + v, changed := fastpathTV.DecMapInt32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uint64) + fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uint64, xlen) + changed = true + } + + var mk int32 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uintptr) + v, changed := fastpathTV.DecMapInt32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uintptr) + fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uintptr, xlen) + changed = true + } + + var mk int32 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]int) + v, changed := fastpathTV.DecMapInt32IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]int) + fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32IntV(v map[int32]int, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]int, xlen) + changed = true + } + + var mk int32 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]int8) + v, changed := fastpathTV.DecMapInt32Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]int8) + fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]int8, xlen) + changed = true + } + + var mk int32 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]int16) + v, changed := fastpathTV.DecMapInt32Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]int16) + fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int32]int16, xlen) + changed = true + } + + var mk int32 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]int32) + v, changed := fastpathTV.DecMapInt32Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]int32) + fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]int32, xlen) + changed = true + } + + var mk int32 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]int64) + v, changed := fastpathTV.DecMapInt32Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]int64) + fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]int64, xlen) + changed = true + } + + var mk int32 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]float32) + v, changed := fastpathTV.DecMapInt32Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]float32) + fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]float32, xlen) + changed = true + } + + var mk int32 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]float64) + v, changed := fastpathTV.DecMapInt32Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]float64) + fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]float64, xlen) + changed = true + } + + var mk int32 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]bool) + v, changed := fastpathTV.DecMapInt32BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]bool) + fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]bool, xlen) + changed = true + } + + var mk int32 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]interface{}) + v, changed := fastpathTV.DecMapInt64IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]interface{}) + fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int64]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int64 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]string) + v, changed := fastpathTV.DecMapInt64StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]string) + fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64StringV(v map[int64]string, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int64]string, xlen) + changed = true + } + + var mk int64 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uint) + v, changed := fastpathTV.DecMapInt64UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uint) + fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uint, xlen) + changed = true + } + + var mk int64 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uint8) + v, changed := fastpathTV.DecMapInt64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uint8) + fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]uint8, xlen) + changed = true + } + + var mk int64 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uint16) + v, changed := fastpathTV.DecMapInt64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uint16) + fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int64]uint16, xlen) + changed = true + } + + var mk int64 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uint32) + v, changed := fastpathTV.DecMapInt64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uint32) + fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]uint32, xlen) + changed = true + } + + var mk int64 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uint64) + v, changed := fastpathTV.DecMapInt64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uint64) + fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uint64, xlen) + changed = true + } + + var mk int64 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uintptr) + v, changed := fastpathTV.DecMapInt64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uintptr) + fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uintptr, xlen) + changed = true + } + + var mk int64 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]int) + v, changed := fastpathTV.DecMapInt64IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]int) + fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64IntV(v map[int64]int, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]int, xlen) + changed = true + } + + var mk int64 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]int8) + v, changed := fastpathTV.DecMapInt64Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]int8) + fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]int8, xlen) + changed = true + } + + var mk int64 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]int16) + v, changed := fastpathTV.DecMapInt64Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]int16) + fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int64]int16, xlen) + changed = true + } + + var mk int64 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]int32) + v, changed := fastpathTV.DecMapInt64Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]int32) + fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]int32, xlen) + changed = true + } + + var mk int64 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]int64) + v, changed := fastpathTV.DecMapInt64Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]int64) + fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]int64, xlen) + changed = true + } + + var mk int64 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]float32) + v, changed := fastpathTV.DecMapInt64Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]float32) + fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]float32, xlen) + changed = true + } + + var mk int64 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]float64) + v, changed := fastpathTV.DecMapInt64Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]float64) + fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]float64, xlen) + changed = true + } + + var mk int64 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]bool) + v, changed := fastpathTV.DecMapInt64BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]bool) + fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]bool, xlen) + changed = true + } + + var mk int64 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]interface{}) + v, changed := fastpathTV.DecMapBoolIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]interface{}) + fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[bool]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk bool + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]string) + v, changed := fastpathTV.DecMapBoolStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]string) + fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolStringV(v map[bool]string, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[bool]string, xlen) + changed = true + } + + var mk bool + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uint) + v, changed := fastpathTV.DecMapBoolUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uint) + fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uint, xlen) + changed = true + } + + var mk bool + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uint8) + v, changed := fastpathTV.DecMapBoolUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uint8) + fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]uint8, xlen) + changed = true + } + + var mk bool + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uint16) + v, changed := fastpathTV.DecMapBoolUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uint16) + fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[bool]uint16, xlen) + changed = true + } + + var mk bool + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uint32) + v, changed := fastpathTV.DecMapBoolUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uint32) + fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]uint32, xlen) + changed = true + } + + var mk bool + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uint64) + v, changed := fastpathTV.DecMapBoolUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uint64) + fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uint64, xlen) + changed = true + } + + var mk bool + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uintptr) + v, changed := fastpathTV.DecMapBoolUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uintptr) + fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uintptr, xlen) + changed = true + } + + var mk bool + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]int) + v, changed := fastpathTV.DecMapBoolIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]int) + fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolIntV(v map[bool]int, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]int, xlen) + changed = true + } + + var mk bool + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]int8) + v, changed := fastpathTV.DecMapBoolInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]int8) + fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]int8, xlen) + changed = true + } + + var mk bool + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]int16) + v, changed := fastpathTV.DecMapBoolInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]int16) + fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[bool]int16, xlen) + changed = true + } + + var mk bool + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]int32) + v, changed := fastpathTV.DecMapBoolInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]int32) + fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]int32, xlen) + changed = true + } + + var mk bool + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]int64) + v, changed := fastpathTV.DecMapBoolInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]int64) + fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]int64, xlen) + changed = true + } + + var mk bool + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]float32) + v, changed := fastpathTV.DecMapBoolFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]float32) + fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]float32, xlen) + changed = true + } + + var mk bool + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]float64) + v, changed := fastpathTV.DecMapBoolFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]float64) + fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]float64, xlen) + changed = true + } + + var mk bool + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]bool) + v, changed := fastpathTV.DecMapBoolBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]bool) + fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]bool, xlen) + changed = true + } + + var mk bool + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/ugorji/go/codec/fast-path.not.go new file mode 100644 index 000000000..63e591145 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/fast-path.not.go @@ -0,0 +1,34 @@ +// +build notfastpath + +package codec + +import "reflect" + +const fastpathEnabled = false + +// The generated fast-path code is very large, and adds a few seconds to the build time. +// This causes test execution, execution of small tools which use codec, etc +// to take a long time. +// +// To mitigate, we now support the notfastpath tag. +// This tag disables fastpath during build, allowing for faster build, test execution, +// short-program runs, etc. + +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } + +type fastpathT struct{} +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*encFnInfo, reflect.Value) + decfn func(*decFnInfo, reflect.Value) +} +type fastpathA [0]fastpathE + +func (x fastpathA) index(rtid uintptr) int { return -1 } + +var fastpathAV fastpathA +var fastpathTV fastpathT diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go new file mode 100644 index 000000000..eb0bdad35 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go @@ -0,0 +1,243 @@ +/* // +build ignore */ + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl +// ************************************************************ + +package codec + +import ( + "encoding" + "reflect" +) + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continuously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { + return genHelperEncoder{e: e}, e.e +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { + return genHelperDecoder{d: d}, d.d +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.be // f.e.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + f.e.encodeI(iv, false, false) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshal(bs, fnerr, false, c_UTF8) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshal(bs, fnerr, true, c_UTF8) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshal(bs, fnerr, false, c_RAW) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { + f.e.raw(iv) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) TimeRtidIfBinc() uintptr { + if _, ok := f.e.hh.(*BincHandle); ok { + return timeTypId + } + return 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + rt := reflect.TypeOf(v) + if rt.Kind() == reflect.Ptr { + rt = rt.Elem() + } + rtid := reflect.ValueOf(rt).Pointer() + if xfFn := f.e.h.getExt(rtid); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncSendContainerState(c containerState) { + if f.e.cr != nil { + f.e.cr.sendContainerState(c) + } +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { + f.d.swallow() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + f.d.decodeI(iv, chkPtr, false, false, false) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeBytes(f.d.b[:], true, true) + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true)) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { + return f.d.raw() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) TimeRtidIfBinc() uintptr { + if _, ok := f.d.hh.(*BincHandle); ok { + return timeTypId + } + return 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + rt := reflect.TypeOf(v).Elem() + rtid := reflect.ValueOf(rt).Pointer() + if xfFn := f.d.h.getExt(rtid); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { + return decInferLen(clen, maxlen, unit) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSendContainerState(c containerState) { + if f.d.cr != nil { + f.d.cr.sendContainerState(c) + } +} diff --git a/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go new file mode 100644 index 000000000..2ace97b78 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen.generated.go @@ -0,0 +1,175 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl + +const genDecMapTmpl = ` +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} > 0 { +for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { + z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} + if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} else if {{var "l"}} < 0 { +for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ { + z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true {{ end }} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} + if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) +` + +const genDecListTmpl = ` +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} +var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{end}} {{if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else if {{var "l"}} > 0 { + {{if isChan }}if {{var "v"}} == nil { + {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + {{var "v"}} = make({{ .CTyp }}, {{var "rl"}}) + {{var "c"}} = true + } + for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ { + {{var "h"}}.ElemContainerState({{var "r"}}) + var {{var "t"}} {{ .Typ }} + {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} + {{var "v"}} <- {{var "t"}} + } + {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} + var {{var "rt"}} bool {{/* truncated */}} + _, _ = {{var "rl"}}, {{var "rt"}} + {{var "rr"}} = {{var "l"}} // len({{var "v"}}) + if {{var "l"}} > cap({{var "v"}}) { + {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) + {{ else }}{{if not .Immutable }} + {{var "rg"}} := len({{var "v"}}) > 0 + {{var "v2"}} := {{var "v"}} {{end}} + {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rt"}} { + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }} + if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}} + } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } {{end}} {{/* end isSlice:47 */}} + {{var "j"}} := 0 + for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ { + {{var "h"}}.ElemContainerState({{var "j"}}) + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { + {{var "h"}}.ElemContainerState({{var "j"}}) + z.DecSwallow() + } + {{ else }}if {{var "rt"}} { + for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { + {{var "v"}} = append({{var "v"}}, {{ zero}}) + {{var "h"}}.ElemContainerState({{var "j"}}) + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + } {{end}} {{/* end isArray:56 */}} + {{end}} {{/* end isChan:16 */}} +} else { {{/* len < 0 */}} + {{var "j"}} := 0 + for ; !r.CheckBreak(); {{var "j"}}++ { + {{if isChan }} + {{var "h"}}.ElemContainerState({{var "j"}}) + var {{var "t"}} {{ .Typ }} + {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} + {{var "v"}} <- {{var "t"}} + {{ else }} + if {{var "j"}} >= len({{var "v"}}) { + {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1) + {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }} + {{var "c"}} = true {{end}} + } + {{var "h"}}.ElemContainerState({{var "j"}}) + if {{var "j"}} < len({{var "v"}}) { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } else { + z.DecSwallow() + } + {{end}} + } + {{if isSlice }}if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + }{{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} +` + diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go new file mode 100644 index 000000000..c4944dbff --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen.go @@ -0,0 +1,2014 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "go/format" + "io" + "io/ioutil" + "math/rand" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "text/template" + "time" + "unicode" + "unicode/utf8" +) + +// --------------------------------------------------- +// codecgen supports the full cycle of reflection-based codec: +// - RawExt +// - Raw +// - Builtins +// - Extensions +// - (Binary|Text|JSON)(Unm|M)arshal +// - generic by-kind +// +// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type. +// In those areas, we try to only do reflection or interface-conversion when NECESSARY: +// - Extensions, only if Extensions are configured. +// +// However, codecgen doesn't support the following: +// - Canonical option. (codecgen IGNORES it currently) +// This is just because it has not been implemented. +// +// During encode/decode, Selfer takes precedence. +// A type implementing Selfer will know how to encode/decode itself statically. +// +// The following field types are supported: +// array: [n]T +// slice: []T +// map: map[K]V +// primitive: [u]int[n], float(32|64), bool, string +// struct +// +// --------------------------------------------------- +// Note that a Selfer cannot call (e|d).(En|De)code on itself, +// as this will cause a circular reference, as (En|De)code will call Selfer methods. +// Any type that implements Selfer must implement completely and not fallback to (En|De)code. +// +// In addition, code in this file manages the generation of fast-path implementations of +// encode/decode of slices/maps of primitive keys/values. +// +// Users MUST re-generate their implementations whenever the code shape changes. +// The generated code will panic if it was generated with a version older than the supporting library. +// --------------------------------------------------- +// +// codec framework is very feature rich. +// When encoding or decoding into an interface, it depends on the runtime type of the interface. +// The type of the interface may be a named type, an extension, etc. +// Consequently, we fallback to runtime codec for encoding/decoding interfaces. +// In addition, we fallback for any value which cannot be guaranteed at runtime. +// This allows us support ANY value, including any named types, specifically those which +// do not implement our interfaces (e.g. Selfer). +// +// This explains some slowness compared to other code generation codecs (e.g. msgp). +// This reduction in speed is only seen when your refers to interfaces, +// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} } +// +// codecgen will panic if the file was generated with an old version of the library in use. +// +// Note: +// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. +// This way, there isn't a function call overhead just to see that we should not enter a block of code. + +// GenVersion is the current version of codecgen. +// +// NOTE: Increment this value each time codecgen changes fundamentally. +// Fundamental changes are: +// - helper methods change (signature change, new ones added, some removed, etc) +// - codecgen command line changes +// +// v1: Initial Version +// v2: +// v3: Changes for Kubernetes: +// changes in signature of some unpublished helper methods and codecgen cmdline arguments. +// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) +// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. +const GenVersion = 5 + +const ( + genCodecPkg = "codec1978" + genTempVarPfx = "yy" + genTopLevelVarName = "x" + + // ignore canBeNil parameter, and always set to true. + // This is because nil can appear anywhere, so we should always check. + genAnythingCanBeNil = true + + // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function; + // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals + // are not executed a lot. + // + // From testing, it didn't make much difference in runtime, so keep as true (one function only) + genUseOneFunctionForDecStructMap = true +) + +type genStructMapStyle uint8 + +const ( + genStructMapStyleConsolidated genStructMapStyle = iota + genStructMapStyleLenPrefix + genStructMapStyleCheckBreak +) + +var ( + genAllTypesSamePkgErr = errors.New("All types must be in the same package") + genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice") + genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") + genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) + genCheckVendor bool +) + +// genRunner holds some state used during a Gen run. +type genRunner struct { + w io.Writer // output + c uint64 // counter used for generating varsfx + t []reflect.Type // list of types to run selfer on + + tc reflect.Type // currently running selfer on this type + te map[uintptr]bool // types for which the encoder has been created + td map[uintptr]bool // types for which the decoder has been created + cp string // codec import path + + im map[string]reflect.Type // imports to add + imn map[string]string // package names of imports to add + imc uint64 // counter for import numbers + + is map[reflect.Type]struct{} // types seen during import search + bp string // base PkgPath, for which we are generating for + + cpfx string // codec package prefix + unsafe bool // is unsafe to be used in generated code? + + tm map[reflect.Type]struct{} // types for which enc/dec must be generated + ts []reflect.Type // types for which enc/dec must be generated + + xs string // top level variable/constant suffix + hn string // fn helper type name + + ti *TypeInfos + // rr *rand.Rand // random generator for file-specific types +} + +// Gen will write a complete go file containing Selfer implementations for each +// type passed. All the types must be in the same package. +// +// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.* +func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) { + // All types passed to this method do not have a codec.Selfer method implemented directly. + // codecgen already checks the AST and skips any types that define the codec.Selfer methods. + // Consequently, there's no need to check and trim them if they implement codec.Selfer + + if len(typ) == 0 { + return + } + x := genRunner{ + unsafe: useUnsafe, + w: w, + t: typ, + te: make(map[uintptr]bool), + td: make(map[uintptr]bool), + im: make(map[string]reflect.Type), + imn: make(map[string]string), + is: make(map[reflect.Type]struct{}), + tm: make(map[reflect.Type]struct{}), + ts: []reflect.Type{}, + bp: genImportPath(typ[0]), + xs: uid, + ti: ti, + } + if x.ti == nil { + x.ti = defTypeInfos + } + if x.xs == "" { + rr := rand.New(rand.NewSource(time.Now().UnixNano())) + x.xs = strconv.FormatInt(rr.Int63n(9999), 10) + } + + // gather imports first: + x.cp = genImportPath(reflect.TypeOf(x)) + x.imn[x.cp] = genCodecPkg + for _, t := range typ { + // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) + if genImportPath(t) != x.bp { + panic(genAllTypesSamePkgErr) + } + x.genRefPkgs(t) + } + if buildTags != "" { + x.line("// +build " + buildTags) + x.line("") + } + x.line(` + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +`) + x.line("package " + pkgName) + x.line("") + x.line("import (") + if x.cp != x.bp { + x.cpfx = genCodecPkg + "." + x.linef("%s \"%s\"", genCodecPkg, x.cp) + } + // use a sorted set of im keys, so that we can get consistent output + imKeys := make([]string, 0, len(x.im)) + for k, _ := range x.im { + imKeys = append(imKeys, k) + } + sort.Strings(imKeys) + for _, k := range imKeys { // for k, _ := range x.im { + x.linef("%s \"%s\"", x.imn[k], k) + } + // add required packages + for _, k := range [...]string{"reflect", "unsafe", "runtime", "fmt", "errors"} { + if _, ok := x.im[k]; !ok { + if k == "unsafe" && !x.unsafe { + continue + } + x.line("\"" + k + "\"") + } + } + x.line(")") + x.line("") + + x.line("const (") + x.linef("// ----- content types ----") + x.linef("codecSelferC_UTF8%s = %v", x.xs, int64(c_UTF8)) + x.linef("codecSelferC_RAW%s = %v", x.xs, int64(c_RAW)) + x.linef("// ----- value types used ----") + x.linef("codecSelferValueTypeArray%s = %v", x.xs, int64(valueTypeArray)) + x.linef("codecSelferValueTypeMap%s = %v", x.xs, int64(valueTypeMap)) + x.linef("// ----- containerStateValues ----") + x.linef("codecSelfer_containerMapKey%s = %v", x.xs, int64(containerMapKey)) + x.linef("codecSelfer_containerMapValue%s = %v", x.xs, int64(containerMapValue)) + x.linef("codecSelfer_containerMapEnd%s = %v", x.xs, int64(containerMapEnd)) + x.linef("codecSelfer_containerArrayElem%s = %v", x.xs, int64(containerArrayElem)) + x.linef("codecSelfer_containerArrayEnd%s = %v", x.xs, int64(containerArrayEnd)) + x.line(")") + x.line("var (") + x.line("codecSelferBitsize" + x.xs + " = uint8(reflect.TypeOf(uint(0)).Bits())") + x.line("codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)") + x.line(")") + x.line("") + + if x.unsafe { + x.line("type codecSelferUnsafeString" + x.xs + " struct { Data uintptr; Len int}") + x.line("") + } + x.hn = "codecSelfer" + x.xs + x.line("type " + x.hn + " struct{}") + x.line("") + + x.varsfxreset() + x.line("func init() {") + x.linef("if %sGenVersion != %v {", x.cpfx, GenVersion) + x.line("_, file, _, _ := runtime.Caller(0)") + x.line(`err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `) + x.linef(`%v, %sGenVersion, file)`, GenVersion, x.cpfx) + x.line("panic(err)") + x.linef("}") + x.line("if false { // reference the types, but skip this branch at build/run time") + var n int + // for k, t := range x.im { + for _, k := range imKeys { + t := x.im[k] + x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) + n++ + } + if x.unsafe { + x.linef("var v%v unsafe.Pointer", n) + n++ + } + if n > 0 { + x.out("_") + for i := 1; i < n; i++ { + x.out(", _") + } + x.out(" = v0") + for i := 1; i < n; i++ { + x.outf(", v%v", i) + } + } + x.line("} ") // close if false + x.line("}") // close init + x.line("") + + // generate rest of type info + for _, t := range typ { + x.tc = t + x.selfer(true) + x.selfer(false) + } + + for _, t := range x.ts { + rtid := reflect.ValueOf(t).Pointer() + // generate enc functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(true) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.encListFallback("v", t) + case reflect.Map: + x.encMapFallback("v", t) + default: + panic(genExpectArrayOrMapErr) + } + x.line("}") + x.line("") + + // generate dec functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(false) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.decListFallback("v", rtid, t) + case reflect.Map: + x.decMapFallback("v", rtid, t) + default: + panic(genExpectArrayOrMapErr) + } + x.line("}") + x.line("") + } + + x.line("") +} + +func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool { + // return varname != genTopLevelVarName && t != x.tc + // the only time we checkForSelfer is if we are not at the TOP of the generated code. + return varname != genTopLevelVarName +} + +func (x *genRunner) arr2str(t reflect.Type, s string) string { + if t.Kind() == reflect.Array { + return s + } + return "" +} + +func (x *genRunner) genRequiredMethodVars(encode bool) { + x.line("var h " + x.hn) + if encode { + x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)") + } else { + x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)") + } + x.line("_, _, _ = h, z, r") +} + +func (x *genRunner) genRefPkgs(t reflect.Type) { + if _, ok := x.is[t]; ok { + return + } + // fmt.Printf(">>>>>>: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) + x.is[t] = struct{}{} + tpkg, tname := genImportPath(t), t.Name() + if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { + if _, ok := x.im[tpkg]; !ok { + x.im[tpkg] = t + if idx := strings.LastIndex(tpkg, "/"); idx < 0 { + x.imn[tpkg] = tpkg + } else { + x.imc++ + x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false) + } + } + } + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan: + x.genRefPkgs(t.Elem()) + case reflect.Map: + x.genRefPkgs(t.Elem()) + x.genRefPkgs(t.Key()) + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' { + x.genRefPkgs(t.Field(i).Type) + } + } + } +} + +func (x *genRunner) line(s string) { + x.out(s) + if len(s) == 0 || s[len(s)-1] != '\n' { + x.out("\n") + } +} + +func (x *genRunner) varsfx() string { + x.c++ + return strconv.FormatUint(x.c, 10) +} + +func (x *genRunner) varsfxreset() { + x.c = 0 +} + +func (x *genRunner) out(s string) { + if _, err := io.WriteString(x.w, s); err != nil { + panic(err) + } +} + +func (x *genRunner) linef(s string, params ...interface{}) { + x.line(fmt.Sprintf(s, params...)) +} + +func (x *genRunner) outf(s string, params ...interface{}) { + x.out(fmt.Sprintf(s, params...)) +} + +func (x *genRunner) genTypeName(t reflect.Type) (n string) { + // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }() + + // if the type has a PkgPath, which doesn't match the current package, + // then include it. + // We cannot depend on t.String() because it includes current package, + // or t.PkgPath because it includes full import path, + // + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "*" + t = t.Elem() + } + if tn := t.Name(); tn != "" { + return ptrPfx + x.genTypeNamePrim(t) + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem()) + case reflect.Slice: + return ptrPfx + "[]" + x.genTypeName(t.Elem()) + case reflect.Array: + return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem()) + case reflect.Chan: + return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem()) + default: + if t == intfTyp { + return ptrPfx + "interface{}" + } else { + return ptrPfx + x.genTypeNamePrim(t) + } + } +} + +func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { + if t.Name() == "" { + return t.String() + } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { + return t.Name() + } else { + return x.imn[genImportPath(t)] + "." + t.Name() + // return t.String() // best way to get the package name inclusive + } +} + +func (x *genRunner) genZeroValueR(t reflect.Type) string { + // if t is a named type, w + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, + reflect.Slice, reflect.Map, reflect.Invalid: + return "nil" + case reflect.Bool: + return "false" + case reflect.String: + return `""` + case reflect.Struct, reflect.Array: + return x.genTypeName(t) + "{}" + default: // all numbers + return "0" + } +} + +func (x *genRunner) genMethodNameT(t reflect.Type) (s string) { + return genMethodNameT(t, x.tc) +} + +func (x *genRunner) selfer(encode bool) { + t := x.tc + t0 := t + // always make decode use a pointer receiver, + // and structs always use a ptr receiver (encode|decode) + isptr := !encode || t.Kind() == reflect.Struct + x.varsfxreset() + fnSigPfx := "func (x " + if isptr { + fnSigPfx += "*" + } + fnSigPfx += x.genTypeName(t) + + x.out(fnSigPfx) + if isptr { + t = reflect.PtrTo(t) + } + if encode { + x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {") + x.genRequiredMethodVars(true) + // x.enc(genTopLevelVarName, t) + x.encVar(genTopLevelVarName, t) + } else { + x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + // do not use decVar, as there is no need to check TryDecodeAsNil + // or way to elegantly handle that, and also setting it to a + // non-nil value doesn't affect the pointer passed. + // x.decVar(genTopLevelVarName, t, false) + x.dec(genTopLevelVarName, t0) + } + x.line("}") + x.line("") + + if encode || t0.Kind() != reflect.Struct { + return + } + + // write is containerMap + if genUseOneFunctionForDecStructMap { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleConsolidated) + x.line("}") + x.line("") + } else { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleLenPrefix) + x.line("}") + x.line("") + + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleCheckBreak) + x.line("}") + x.line("") + } + + // write containerArray + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructArray(genTopLevelVarName, "l", "return", reflect.ValueOf(t0).Pointer(), t0) + x.line("}") + x.line("") + +} + +// used for chan, array, slice, map +func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) { + if encode { + x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), varname) + } else { + x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname) + } + x.registerXtraT(t) +} + +func (x *genRunner) registerXtraT(t reflect.Type) { + // recursively register the types + if _, ok := x.tm[t]; ok { + return + } + var tkey reflect.Type + switch t.Kind() { + case reflect.Chan, reflect.Slice, reflect.Array: + case reflect.Map: + tkey = t.Key() + default: + return + } + x.tm[t] = struct{}{} + x.ts = append(x.ts, t) + // check if this refers to any xtra types eg. a slice of array: add the array + x.registerXtraT(t.Elem()) + if tkey != nil { + x.registerXtraT(tkey) + } +} + +// encVar will encode a variable. +// The parameter, t, is the reflect.Type of the variable itself +func (x *genRunner) encVar(varname string, t reflect.Type) { + // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t) + var checkNil bool + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan: + checkNil = true + } + if checkNil { + x.linef("if %s == nil { r.EncodeNil() } else { ", varname) + } + switch t.Kind() { + case reflect.Ptr: + switch t.Elem().Kind() { + case reflect.Struct, reflect.Array: + x.enc(varname, genNonPtr(t)) + default: + i := x.varsfx() + x.line(genTempVarPfx + i + " := *" + varname) + x.enc(genTempVarPfx+i, genNonPtr(t)) + } + case reflect.Struct, reflect.Array: + i := x.varsfx() + x.line(genTempVarPfx + i + " := &" + varname) + x.enc(genTempVarPfx+i, t) + default: + x.enc(varname, t) + } + + if checkNil { + x.line("}") + } + +} + +// enc will encode a variable (varname) of type t, +// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type ptrTo(T) (to prevent copying) +func (x *genRunner) enc(varname string, t reflect.Type) { + rtid := reflect.ValueOf(t).Pointer() + // We call CodecEncodeSelf if one of the following are honored: + // - the type already implements Selfer, call that + // - the type has a Selfer implementation just created, use that + // - the type is in the list of the ones we will generate for, but it is not currently being generated + + mi := x.varsfx() + tptr := reflect.PtrTo(t) + tk := t.Kind() + if x.checkForSelfer(t, varname) { + if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T + if tptr.Implements(selferTyp) || t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } else { // varname is of type T + if t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } else if tptr.Implements(selferTyp) { + x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname) + x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi) + return + } + } + + if _, ok := x.te[rtid]; ok { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.te[rtid] = true + rtidAdded = true + } + + // check if + // - type is RawExt, Raw + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi) + x.linef("_ = %sm%s", genTempVarPfx, mi) + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + if t == rawTyp { + x.linef("} else { z.EncRaw(%v)", varname) + return + } + if t == rawExtTyp { + x.linef("} else { r.EncodeRawExt(%v, e)", varname) + return + } + // HACK: Support for Builtins. + // Currently, only Binc supports builtins, and the only builtin type is time.Time. + // Have a method that returns the rtid for time.Time if Handle is Binc. + if t == timeTyp { + vrtid := genTempVarPfx + "m" + x.varsfx() + x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) + x.linef("r.EncodeBuiltin(%s, %s)", vrtid, varname) + } + // only check for extensions if the type is named, and has a packagePath. + if genImportPath(t) != "" && t.Name() != "" { + // first check if extensions are configued, before doing the interface conversion + x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname) + } + if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T + if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) + } else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) + } + } else { // varname is of type T + if t.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(&%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", genTempVarPfx, mi, varname) + } else if t.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(&%v) ", genTempVarPfx, mi, varname) + } + } + x.line("} else {") + + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(int64(" + varname + "))") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(uint64(" + varname + "))") + case reflect.Float32: + x.line("r.EncodeFloat32(float32(" + varname + "))") + case reflect.Float64: + x.line("r.EncodeFloat64(float64(" + varname + "))") + case reflect.Bool: + x.line("r.EncodeBool(bool(" + varname + "))") + case reflect.String: + x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(" + varname + "))") + case reflect.Chan: + x.xtraSM(varname, true, t) + // x.encListFallback(varname, rtid, t) + case reflect.Array: + x.xtraSM(varname, true, t) + case reflect.Slice: + // if nil, call dedicated function + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.line("r.EncodeStringBytes(codecSelferC_RAW" + x.xs + ", []byte(" + varname + "))") + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)") + } else { + x.xtraSM(varname, true, t) + // x.encListFallback(varname, rtid, t) + } + case reflect.Map: + // if nil, call dedicated function + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ") + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)") + } else { + x.xtraSM(varname, true, t) + // x.encMapFallback(varname, rtid, t) + } + case reflect.Struct: + if !inlist { + delete(x.te, rtid) + x.line("z.EncFallback(" + varname + ")") + break + } + x.encStruct(varname, rtid, t) + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.EncFallback(" + varname + ")") + } +} + +func (x *genRunner) encZero(t reflect.Type) { + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(0)") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(0)") + case reflect.Float32: + x.line("r.EncodeFloat32(0)") + case reflect.Float64: + x.line("r.EncodeFloat64(0)") + case reflect.Bool: + x.line("r.EncodeBool(false)") + case reflect.String: + x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + `, "")`) + default: + x.line("r.EncodeNil()") + } +} + +func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { + // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. ) + // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it + + // if t === type currently running selfer on, do for all + ti := x.ti.get(rtid, t) + i := x.varsfx() + sepVarname := genTempVarPfx + "sep" + i + numfieldsvar := genTempVarPfx + "q" + i + ti2arrayvar := genTempVarPfx + "r" + i + struct2arrvar := genTempVarPfx + "2arr" + i + + x.line(sepVarname + " := !z.EncBinary()") + x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar) + tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. + // due to omitEmpty, we need to calculate the + // number of non-empty things we write out first. + // This is required as we need to pre-determine the size of the container, + // to support length-prefixing. + x.linef("var %s [%v]bool", numfieldsvar, len(tisfi)) + x.linef("_, _, _ = %s, %s, %s", sepVarname, numfieldsvar, struct2arrvar) + x.linef("const %s bool = %v", ti2arrayvar, ti.toArray) + nn := 0 + for j, si := range tisfi { + if !si.omitEmpty { + nn++ + continue + } + var t2 reflect.StructField + var omitline string + if si.i != -1 { + t2 = t.Field(int(si.i)) + } else { + t2typ := t + varname3 := varname + for _, ix := range si.is { + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(ix) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + omitline += varname3 + " != nil && " + } + } + } + // never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. + // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) + switch t2.Type.Kind() { + case reflect.Struct: + omitline += " true" + case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: + omitline += "len(" + varname + "." + t2.Name + ") != 0" + default: + omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type) + } + x.linef("%s[%v] = %s", numfieldsvar, j, omitline) + } + x.linef("var %snn%s int", genTempVarPfx, i) + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.line("r.EncodeArrayStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")") + x.linef("} else {") // if not ti.toArray + x.linef("%snn%s = %v", genTempVarPfx, i, nn) + x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) + x.linef("r.EncodeMapStart(%snn%s)", genTempVarPfx, i) + x.linef("%snn%s = %v", genTempVarPfx, i, 0) + // x.line("r.EncodeMapStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")") + x.line("}") // close if not StructToArray + + for j, si := range tisfi { + i := x.varsfx() + isNilVarName := genTempVarPfx + "n" + i + var labelUsed bool + var t2 reflect.StructField + if si.i != -1 { + t2 = t.Field(int(si.i)) + } else { + t2typ := t + varname3 := varname + for _, ix := range si.is { + // fmt.Printf("%%%% %v, ix: %v\n", t2typ, ix) + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(ix) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + if !labelUsed { + x.line("var " + isNilVarName + " bool") + } + x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ") + x.line("goto LABEL" + i) + x.line("}") + labelUsed = true + // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }") + } + } + // t2 = t.FieldByIndex(si.is) + } + if labelUsed { + x.line("LABEL" + i + ":") + } + // if the type of the field is a Selfer, or one of the ones + + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray + if labelUsed { + x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") + } + x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + if si.omitEmpty { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.encVar(varname+"."+t2.Name, t2.Type) + if si.omitEmpty { + x.linef("} else {") + x.encZero(t2.Type) + x.linef("}") + } + if labelUsed { + x.line("}") + } + + x.linef("} else {") // if not ti.toArray + + if si.omitEmpty { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + si.encName + "\"))") + x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + if labelUsed { + x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") + x.encVar(varname+"."+t2.Name, t2.Type) + x.line("}") + } else { + x.encVar(varname+"."+t2.Name, t2.Type) + } + if si.omitEmpty { + x.line("}") + } + x.linef("} ") // end if/else ti.toArray + } + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) + x.line("} else {") + x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) + x.line("}") + +} + +func (x *genRunner) encListFallback(varname string, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, []byte(%s))", x.xs, varname) + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, ([%v]byte(%s))[:])", x.xs, t.Len(), varname) + return + } + i := x.varsfx() + g := genTempVarPfx + x.line("r.EncodeArrayStart(len(" + varname + "))") + if t.Kind() == reflect.Chan { + x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i) + x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.linef("%sv%s := <-%s", g, i, varname) + } else { + // x.linef("for %si%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) + x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) + x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + } + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) +} + +func (x *genRunner) encMapFallback(varname string, t reflect.Type) { + // TODO: expand this to handle canonical. + i := x.varsfx() + x.line("r.EncodeMapStart(len(" + varname + "))") + x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) + // x.line("for " + genTempVarPfx + "k" + i + ", " + genTempVarPfx + "v" + i + " := range " + varname + " {") + x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.encVar(genTempVarPfx+"k"+i, t.Key()) + x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) +} + +func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) { + // We only encode as nil if a nillable value. + // This removes some of the wasted checks for TryDecodeAsNil. + // We need to think about this more, to see what happens if omitempty, etc + // cause a nil value to be stored when something is expected. + // This could happen when decoding from a struct encoded as an array. + // For that, decVar should be called with canNil=true, to force true as its value. + i := x.varsfx() + if !canBeNil { + canBeNil = genAnythingCanBeNil || !genIsImmutable(t) + } + if canBeNil { + x.line("if r.TryDecodeAsNil() {") + if t.Kind() == reflect.Ptr { + x.line("if " + varname + " != nil { ") + + // if varname is a field of a struct (has a dot in it), + // then just set it to nil + if strings.IndexByte(varname, '.') != -1 { + x.line(varname + " = nil") + } else { + x.line("*" + varname + " = " + x.genZeroValueR(t.Elem())) + } + x.line("}") + } else { + x.line(varname + " = " + x.genZeroValueR(t)) + } + x.line("} else {") + } else { + x.line("// cannot be nil") + } + if t.Kind() != reflect.Ptr { + if x.decTryAssignPrimitive(varname, t) { + x.line(genTempVarPfx + "v" + i + " := &" + varname) + x.dec(genTempVarPfx+"v"+i, t) + } + } else { + x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem())) + // Ensure we set underlying ptr to a non-nil value (so we can deref to it later). + // There's a chance of a **T in here which is nil. + var ptrPfx string + for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() { + ptrPfx += "*" + x.linef("if %s%s == nil { %s%s = new(%s)}", + ptrPfx, varname, ptrPfx, varname, x.genTypeName(t)) + } + // if varname has [ in it, then create temp variable for this ptr thingie + if strings.Index(varname, "[") >= 0 { + varname2 := genTempVarPfx + "w" + i + x.line(varname2 + " := " + varname) + varname = varname2 + } + + if ptrPfx == "" { + x.dec(varname, t) + } else { + x.line(genTempVarPfx + "z" + i + " := " + ptrPfx + varname) + x.dec(genTempVarPfx+"z"+i, t) + } + + } + + if canBeNil { + x.line("} ") + } +} + +// dec will decode a variable (varname) of type ptrTo(t). +// t is always a basetype (i.e. not of kind reflect.Ptr). +func (x *genRunner) dec(varname string, t reflect.Type) { + // assumptions: + // - the varname is to a pointer already. No need to take address of it + // - t is always a baseType T (not a *T, etc). + rtid := reflect.ValueOf(t).Pointer() + tptr := reflect.PtrTo(t) + if x.checkForSelfer(t, varname) { + if t.Implements(selferTyp) || tptr.Implements(selferTyp) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + if _, ok := x.td[rtid]; ok { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.td[rtid] = true + rtidAdded = true + } + + // check if + // - type is Raw, RawExt + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + mi := x.varsfx() + x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) + x.linef("_ = %sm%s", genTempVarPfx, mi) + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + if t == rawTyp { + x.linef("} else { *%v = z.DecRaw()", varname) + return + } + if t == rawExtTyp { + x.linef("} else { r.DecodeExt(%v, 0, nil)", varname) + return + } + + // HACK: Support for Builtins. + // Currently, only Binc supports builtins, and the only builtin type is time.Time. + // Have a method that returns the rtid for time.Time if Handle is Binc. + if t == timeTyp { + vrtid := genTempVarPfx + "m" + x.varsfx() + x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) + x.linef("r.DecodeBuiltin(%s, %s)", vrtid, varname) + } + // only check for extensions if the type is named, and has a packagePath. + if genImportPath(t) != "" && t.Name() != "" { + // first check if extensions are configued, before doing the interface conversion + x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) + } + + if t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) { + x.linef("} else if %sm%s { z.DecBinaryUnmarshal(%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.DecJSONUnmarshal(%v)", genTempVarPfx, mi, varname) + } else if t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) { + x.linef("} else if !%sm%s { z.DecTextUnmarshal(%v)", genTempVarPfx, mi, varname) + } + + x.line("} else {") + + // Since these are pointers, we cannot share, and have to use them one by one + switch t.Kind() { + case reflect.Int: + x.line("*((*int)(" + varname + ")) = int(r.DecodeInt(codecSelferBitsize" + x.xs + "))") + // x.line("z.DecInt((*int)(" + varname + "))") + case reflect.Int8: + x.line("*((*int8)(" + varname + ")) = int8(r.DecodeInt(8))") + // x.line("z.DecInt8((*int8)(" + varname + "))") + case reflect.Int16: + x.line("*((*int16)(" + varname + ")) = int16(r.DecodeInt(16))") + // x.line("z.DecInt16((*int16)(" + varname + "))") + case reflect.Int32: + x.line("*((*int32)(" + varname + ")) = int32(r.DecodeInt(32))") + // x.line("z.DecInt32((*int32)(" + varname + "))") + case reflect.Int64: + x.line("*((*int64)(" + varname + ")) = int64(r.DecodeInt(64))") + // x.line("z.DecInt64((*int64)(" + varname + "))") + + case reflect.Uint: + x.line("*((*uint)(" + varname + ")) = uint(r.DecodeUint(codecSelferBitsize" + x.xs + "))") + // x.line("z.DecUint((*uint)(" + varname + "))") + case reflect.Uint8: + x.line("*((*uint8)(" + varname + ")) = uint8(r.DecodeUint(8))") + // x.line("z.DecUint8((*uint8)(" + varname + "))") + case reflect.Uint16: + x.line("*((*uint16)(" + varname + ")) = uint16(r.DecodeUint(16))") + //x.line("z.DecUint16((*uint16)(" + varname + "))") + case reflect.Uint32: + x.line("*((*uint32)(" + varname + ")) = uint32(r.DecodeUint(32))") + //x.line("z.DecUint32((*uint32)(" + varname + "))") + case reflect.Uint64: + x.line("*((*uint64)(" + varname + ")) = uint64(r.DecodeUint(64))") + //x.line("z.DecUint64((*uint64)(" + varname + "))") + case reflect.Uintptr: + x.line("*((*uintptr)(" + varname + ")) = uintptr(r.DecodeUint(codecSelferBitsize" + x.xs + "))") + + case reflect.Float32: + x.line("*((*float32)(" + varname + ")) = float32(r.DecodeFloat(true))") + //x.line("z.DecFloat32((*float32)(" + varname + "))") + case reflect.Float64: + x.line("*((*float64)(" + varname + ")) = float64(r.DecodeFloat(false))") + // x.line("z.DecFloat64((*float64)(" + varname + "))") + + case reflect.Bool: + x.line("*((*bool)(" + varname + ")) = r.DecodeBool()") + // x.line("z.DecBool((*bool)(" + varname + "))") + case reflect.String: + x.line("*((*string)(" + varname + ")) = r.DecodeString()") + // x.line("z.DecString((*string)(" + varname + "))") + case reflect.Array, reflect.Chan: + x.xtraSM(varname, false, t) + // x.decListFallback(varname, rtid, true, t) + case reflect.Slice: + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false, false)") + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)") + } else { + x.xtraSM(varname, false, t) + // x.decListFallback(varname, rtid, false, t) + } + case reflect.Map: + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)") + } else { + x.xtraSM(varname, false, t) + // x.decMapFallback(varname, rtid, t) + } + case reflect.Struct: + if inlist { + x.decStruct(varname, rtid, t) + } else { + // delete(x.td, rtid) + x.line("z.DecFallback(" + varname + ", false)") + } + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.DecFallback(" + varname + ", true)") + } +} + +func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) { + // This should only be used for exact primitives (ie un-named types). + // Named types may be implementations of Selfer, Unmarshaler, etc. + // They should be handled by dec(...) + + if t.Name() != "" { + tryAsPtr = true + return + } + + switch t.Kind() { + case reflect.Int: + x.linef("%s = r.DecodeInt(codecSelferBitsize%s)", varname, x.xs) + case reflect.Int8: + x.linef("%s = r.DecodeInt(8)", varname) + case reflect.Int16: + x.linef("%s = r.DecodeInt(16)", varname) + case reflect.Int32: + x.linef("%s = r.DecodeInt(32)", varname) + case reflect.Int64: + x.linef("%s = r.DecodeInt(64)", varname) + + case reflect.Uint: + x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) + case reflect.Uint8: + x.linef("%s = r.DecodeUint(8)", varname) + case reflect.Uint16: + x.linef("%s = r.DecodeUint(16)", varname) + case reflect.Uint32: + x.linef("%s = r.DecodeUint(32)", varname) + case reflect.Uint64: + x.linef("%s = r.DecodeUint(64)", varname) + case reflect.Uintptr: + x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) + + case reflect.Float32: + x.linef("%s = r.DecodeFloat(true)", varname) + case reflect.Float64: + x.linef("%s = r.DecodeFloat(false)", varname) + + case reflect.Bool: + x.linef("%s = r.DecodeBool()", varname) + case reflect.String: + x.linef("%s = r.DecodeString()", varname) + default: + tryAsPtr = true + } + return +} + +func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false, false)") + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], false, true)", t.Len(), varname) + return + } + type tstruc struct { + TempVar string + Rand string + Varname string + CTyp string + Typ string + Immutable bool + Size int + } + telem := t.Elem() + ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())} + + funcs := make(template.FuncMap) + + funcs["decLineVar"] = func(varname string) string { + x.decVar(varname, telem, false) + return "" + } + funcs["decLine"] = func(pfx string) string { + x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) + return "" + } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + funcs["zero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["isArray"] = func() bool { + return t.Kind() == reflect.Array + } + funcs["isSlice"] = func() bool { + return t.Kind() == reflect.Slice + } + funcs["isChan"] = func() bool { + return t.Kind() == reflect.Chan + } + tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) { + type tstruc struct { + TempVar string + Sfx string + Rand string + Varname string + KTyp string + Typ string + Size int + } + telem := t.Elem() + tkey := t.Key() + ts := tstruc{ + genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey), + x.genTypeName(telem), int(telem.Size() + tkey.Size()), + } + + funcs := make(template.FuncMap) + funcs["decElemZero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["decElemKindImmutable"] = func() bool { + return genIsImmutable(telem) + } + funcs["decElemKindPtr"] = func() bool { + return telem.Kind() == reflect.Ptr + } + funcs["decElemKindIntf"] = func() bool { + return telem.Kind() == reflect.Interface + } + funcs["decLineVarK"] = func(varname string) string { + x.decVar(varname, tkey, false) + return "" + } + funcs["decLineVar"] = func(varname string) string { + x.decVar(varname, telem, false) + return "" + } + funcs["decLineK"] = func(pfx string) string { + x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false) + return "" + } + funcs["decLine"] = func(pfx string) string { + x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) + return "" + } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + + tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) { + ti := x.ti.get(rtid, t) + tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. + x.line("switch (" + kName + ") {") + for _, si := range tisfi { + x.line("case \"" + si.encName + "\":") + var t2 reflect.StructField + if si.i != -1 { + t2 = t.Field(int(si.i)) + } else { + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. + // t2 = t.FieldByIndex(si.is) + t2typ := t + varname3 := varname + for _, ix := range si.is { + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(ix) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) + } + } + } + x.decVar(varname+"."+t2.Name, t2.Type, false) + } + x.line("default:") + // pass the slice here, so that the string will not escape, and maybe save allocation + x.line("z.DecStructFieldNotFound(-1, " + kName + ")") + x.line("} // end switch " + kName) +} + +func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) { + tpfx := genTempVarPfx + i := x.varsfx() + kName := tpfx + "s" + i + + // We thought to use ReadStringAsBytes, as go compiler might optimize the copy out. + // However, using that was more expensive, as it seems that the switch expression + // is evaluated each time. + // + // We could depend on decodeString using a temporary/shared buffer internally. + // However, this model of creating a byte array, and using explicitly is faster, + // and allows optional use of unsafe []byte->string conversion without alloc. + + // Also, ensure that the slice array doesn't escape. + // That will help escape analysis prevent allocation when it gets better. + + // x.line("var " + kName + "Arr = [32]byte{} // default string to decode into") + // x.line("var " + kName + "Slc = " + kName + "Arr[:] // default slice to decode into") + // use the scratch buffer to avoid allocation (most field names are < 32). + + x.line("var " + kName + "Slc = z.DecScratchBuffer() // default slice to decode into") + + x.line("_ = " + kName + "Slc") + switch style { + case genStructMapStyleLenPrefix: + x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i) + case genStructMapStyleCheckBreak: + x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i) + default: // 0, otherwise. + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i) + x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname) + x.line("} else { if r.CheckBreak() { break }; }") + } + x.linef("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.line(kName + "Slc = r.DecodeBytes(" + kName + "Slc, true, true)") + // let string be scoped to this loop alone, so it doesn't escape. + if x.unsafe { + x.line(kName + "SlcHdr := codecSelferUnsafeString" + x.xs + "{uintptr(unsafe.Pointer(&" + + kName + "Slc[0])), len(" + kName + "Slc)}") + x.line(kName + " := *(*string)(unsafe.Pointer(&" + kName + "SlcHdr))") + } else { + x.line(kName + " := string(" + kName + "Slc)") + } + x.linef("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + x.decStructMapSwitch(kName, varname, rtid, t) + + x.line("} // end for " + tpfx + "j" + i) + x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) +} + +func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { + tpfx := genTempVarPfx + i := x.varsfx() + ti := x.ti.get(rtid, t) + tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. + x.linef("var %sj%s int", tpfx, i) + x.linef("var %sb%s bool", tpfx, i) // break + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + for _, si := range tisfi { + var t2 reflect.StructField + if si.i != -1 { + t2 = t.Field(int(si.i)) + } else { + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. + // t2 = t.FieldByIndex(si.is) + t2typ := t + varname3 := varname + for _, ix := range si.is { + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(ix) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) + } + } + } + + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }", + tpfx, i, x.xs, breakString) + x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.decVar(varname+"."+t2.Name, t2.Type, true) + } + // read remaining values and throw away. + x.line("for {") + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { break }", tpfx, i) + x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) + x.line("}") + x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) +} + +func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { + // if container is map + i := x.varsfx() + x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i) + x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) + if genUseOneFunctionForDecStructMap { + x.line("} else { ") + x.linef("x.codecDecodeSelfFromMap(%sl%s, d)", genTempVarPfx, i) + } else { + x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ") + x.line("x.codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)") + x.line("} else {") + x.line("x.codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)") + } + x.line("}") + + // else if container is array + x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) + x.line("} else { ") + x.linef("x.codecDecodeSelfFromArray(%sl%s, d)", genTempVarPfx, i) + x.line("}") + // else panic + x.line("} else { ") + x.line("panic(codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + ")") + x.line("} ") +} + +// -------- + +type genV struct { + // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice + MapKey string + Elem string + Primitive string + Size int +} + +func (x *genRunner) newGenV(t reflect.Type) (v genV) { + switch t.Kind() { + case reflect.Slice, reflect.Array: + te := t.Elem() + v.Elem = x.genTypeName(te) + v.Size = int(te.Size()) + case reflect.Map: + te, tk := t.Elem(), t.Key() + v.Elem = x.genTypeName(te) + v.MapKey = x.genTypeName(tk) + v.Size = int(te.Size() + tk.Size()) + default: + panic("unexpected type for newGenV. Requires map or slice type") + } + return +} + +func (x *genV) MethodNamePfx(prefix string, prim bool) string { + var name []byte + if prefix != "" { + name = append(name, prefix...) + } + if prim { + name = append(name, genTitleCaseName(x.Primitive)...) + } else { + if x.MapKey == "" { + name = append(name, "Slice"...) + } else { + name = append(name, "Map"...) + name = append(name, genTitleCaseName(x.MapKey)...) + } + name = append(name, genTitleCaseName(x.Elem)...) + } + return string(name) + +} + +// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. +// +// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, +// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. +// We strip it here. +func genImportPath(t reflect.Type) (s string) { + s = t.PkgPath() + if genCheckVendor { + // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. + // if s contains /vendor/ OR startsWith vendor/, then return everything after it. + const vendorStart = "vendor/" + const vendorInline = "/vendor/" + if i := strings.LastIndex(s, vendorInline); i >= 0 { + s = s[i+len(vendorInline):] + } else if strings.HasPrefix(s, vendorStart) { + s = s[len(vendorStart):] + } + } + return +} + +// A go identifier is (letter|_)[letter|number|_]* +func genGoIdentifier(s string, checkFirstChar bool) string { + b := make([]byte, 0, len(s)) + t := make([]byte, 4) + var n int + for i, r := range s { + if checkFirstChar && i == 0 && !unicode.IsLetter(r) { + b = append(b, '_') + } + // r must be unicode_letter, unicode_digit or _ + if unicode.IsLetter(r) || unicode.IsDigit(r) { + n = utf8.EncodeRune(t, r) + b = append(b, t[:n]...) + } else { + b = append(b, '_') + } + } + return string(b) +} + +func genNonPtr(t reflect.Type) reflect.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func genTitleCaseName(s string) string { + switch s { + case "interface{}", "interface {}": + return "Intf" + default: + return strings.ToUpper(s[0:1]) + s[1:] + } +} + +func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "Ptrto" + t = t.Elem() + } + tstr := t.String() + if tn := t.Name(); tn != "" { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + return ptrPfx + tn + } else { + if genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef) + case reflect.Slice: + return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef) + case reflect.Array: + return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef) + case reflect.Chan: + var cx string + switch t.ChanDir() { + case reflect.SendDir: + cx = "ChanSend" + case reflect.RecvDir: + cx = "ChanRecv" + default: + cx = "Chan" + } + return ptrPfx + cx + genMethodNameT(t.Elem(), tRef) + default: + if t == intfTyp { + return ptrPfx + "Interface" + } else { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + if t.Name() != "" { + return ptrPfx + t.Name() + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } else { + // best way to get the package name inclusive + // return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr)) + if t.Name() != "" && genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + } +} + +// genCustomNameForType base64encodes the t.String() value in such a way +// that it can be used within a function name. +func genCustomTypeName(tstr string) string { + len2 := genBase64enc.EncodedLen(len(tstr)) + bufx := make([]byte, len2) + genBase64enc.Encode(bufx, []byte(tstr)) + for i := len2 - 1; i >= 0; i-- { + if bufx[i] == '=' { + len2-- + } else { + break + } + } + return string(bufx[:len2]) +} + +func genIsImmutable(t reflect.Type) (v bool) { + return isImmutableKind(t.Kind()) +} + +type genInternal struct { + Values []genV + Unsafe bool +} + +func (x genInternal) FastpathLen() (l int) { + for _, v := range x.Values { + if v.Primitive == "" { + l++ + } + } + return +} + +func genInternalZeroValue(s string) string { + switch s { + case "interface{}", "interface {}": + return "nil" + case "bool": + return "false" + case "string": + return `""` + default: + return "0" + } +} + +func genInternalEncCommandAsString(s string, vname string) string { + switch s { + case "uint", "uint8", "uint16", "uint32", "uint64": + return "ee.EncodeUint(uint64(" + vname + "))" + case "int", "int8", "int16", "int32", "int64": + return "ee.EncodeInt(int64(" + vname + "))" + case "string": + return "ee.EncodeString(c_UTF8, " + vname + ")" + case "float32": + return "ee.EncodeFloat32(" + vname + ")" + case "float64": + return "ee.EncodeFloat64(" + vname + ")" + case "bool": + return "ee.EncodeBool(" + vname + ")" + case "symbol": + return "ee.EncodeSymbol(" + vname + ")" + default: + return "e.encode(" + vname + ")" + } +} + +func genInternalDecCommandAsString(s string) string { + switch s { + case "uint": + return "uint(dd.DecodeUint(uintBitsize))" + case "uint8": + return "uint8(dd.DecodeUint(8))" + case "uint16": + return "uint16(dd.DecodeUint(16))" + case "uint32": + return "uint32(dd.DecodeUint(32))" + case "uint64": + return "dd.DecodeUint(64)" + case "uintptr": + return "uintptr(dd.DecodeUint(uintBitsize))" + case "int": + return "int(dd.DecodeInt(intBitsize))" + case "int8": + return "int8(dd.DecodeInt(8))" + case "int16": + return "int16(dd.DecodeInt(16))" + case "int32": + return "int32(dd.DecodeInt(32))" + case "int64": + return "dd.DecodeInt(64)" + + case "string": + return "dd.DecodeString()" + case "float32": + return "float32(dd.DecodeFloat(true))" + case "float64": + return "dd.DecodeFloat(false)" + case "bool": + return "dd.DecodeBool()" + default: + panic(errors.New("gen internal: unknown type for decode: " + s)) + } +} + +func genInternalSortType(s string, elem bool) string { + for _, v := range [...]string{"int", "uint", "float", "bool", "string"} { + if strings.HasPrefix(s, v) { + if elem { + if v == "int" || v == "uint" || v == "float" { + return v + "64" + } else { + return v + } + } + return v + "Slice" + } + } + panic("sorttype: unexpected type: " + s) +} + +// var genInternalMu sync.Mutex +var genInternalV genInternal +var genInternalTmplFuncs template.FuncMap +var genInternalOnce sync.Once + +func genInternalInit() { + types := [...]string{ + "interface{}", + "string", + "float32", + "float64", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "bool", + } + // keep as slice, so it is in specific iteration order. + // Initial order was uint64, string, interface{}, int, int64 + mapvaltypes := [...]string{ + "interface{}", + "string", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "float32", + "float64", + "bool", + } + wordSizeBytes := int(intBitsize) / 8 + + mapvaltypes2 := map[string]int{ + "interface{}": 2 * wordSizeBytes, + "string": 2 * wordSizeBytes, + "uint": 1 * wordSizeBytes, + "uint8": 1, + "uint16": 2, + "uint32": 4, + "uint64": 8, + "uintptr": 1 * wordSizeBytes, + "int": 1 * wordSizeBytes, + "int8": 1, + "int16": 2, + "int32": 4, + "int64": 8, + "float32": 4, + "float64": 8, + "bool": 1, + } + var gt genInternal + + // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function + for _, s := range types { + gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) + if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. + gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) + } + if _, ok := mapvaltypes2[s]; !ok { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]}) + } + for _, ms := range mapvaltypes { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]}) + } + } + + funcs := make(template.FuncMap) + // funcs["haspfx"] = strings.HasPrefix + funcs["encmd"] = genInternalEncCommandAsString + funcs["decmd"] = genInternalDecCommandAsString + funcs["zerocmd"] = genInternalZeroValue + funcs["hasprefix"] = strings.HasPrefix + funcs["sorttype"] = genInternalSortType + + genInternalV = gt + genInternalTmplFuncs = funcs +} + +// genInternalGoFile is used to generate source files from templates. +// It is run by the program author alone. +// Unfortunately, it has to be exported so that it can be called from a command line tool. +// *** DO NOT USE *** +func genInternalGoFile(r io.Reader, w io.Writer, safe bool) (err error) { + genInternalOnce.Do(genInternalInit) + + gt := genInternalV + gt.Unsafe = !safe + + t := template.New("").Funcs(genInternalTmplFuncs) + + tmplstr, err := ioutil.ReadAll(r) + if err != nil { + return + } + + if t, err = t.Parse(string(tmplstr)); err != nil { + return + } + + var out bytes.Buffer + err = t.Execute(&out, gt) + if err != nil { + return + } + + bout, err := format.Source(out.Bytes()) + if err != nil { + w.Write(out.Bytes()) // write out if error, so we can still see. + // w.Write(bout) // write out if error, as much as possible, so we can still see. + return + } + w.Write(bout) + return +} diff --git a/vendor/github.com/ugorji/go/codec/gen_15.go b/vendor/github.com/ugorji/go/codec/gen_15.go new file mode 100644 index 000000000..ab76c3102 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen_15.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5,!go1.6 + +package codec + +import "os" + +func init() { + genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" +} diff --git a/vendor/github.com/ugorji/go/codec/gen_16.go b/vendor/github.com/ugorji/go/codec/gen_16.go new file mode 100644 index 000000000..87c04e2e1 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen_16.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.6 + +package codec + +import "os" + +func init() { + genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0" +} diff --git a/vendor/github.com/ugorji/go/codec/gen_17.go b/vendor/github.com/ugorji/go/codec/gen_17.go new file mode 100644 index 000000000..3881a43ce --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen_17.go @@ -0,0 +1,10 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.7 + +package codec + +func init() { + genCheckVendor = true +} diff --git a/vendor/github.com/ugorji/go/codec/helper.go b/vendor/github.com/ugorji/go/codec/helper.go new file mode 100644 index 000000000..8b94fc1e4 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper.go @@ -0,0 +1,1314 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// Contains code shared by both encode and decode. + +// Some shared ideas around encoding/decoding +// ------------------------------------------ +// +// If an interface{} is passed, we first do a type assertion to see if it is +// a primitive type or a map/slice of primitive types, and use a fastpath to handle it. +// +// If we start with a reflect.Value, we are already in reflect.Value land and +// will try to grab the function for the underlying Type and directly call that function. +// This is more performant than calling reflect.Value.Interface(). +// +// This still helps us bypass many layers of reflection, and give best performance. +// +// Containers +// ------------ +// Containers in the stream are either associative arrays (key-value pairs) or +// regular arrays (indexed by incrementing integers). +// +// Some streams support indefinite-length containers, and use a breaking +// byte-sequence to denote that the container has come to an end. +// +// Some streams also are text-based, and use explicit separators to denote the +// end/beginning of different values. +// +// During encode, we use a high-level condition to determine how to iterate through +// the container. That decision is based on whether the container is text-based (with +// separators) or binary (without separators). If binary, we do not even call the +// encoding of separators. +// +// During decode, we use a different high-level condition to determine how to iterate +// through the containers. That decision is based on whether the stream contained +// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that +// it has to be binary, and we do not even try to read separators. +// +// Philosophy +// ------------ +// On decode, this codec will update containers appropriately: +// - If struct, update fields from stream into fields of struct. +// If field in stream not found in struct, handle appropriately (based on option). +// If a struct field has no corresponding value in the stream, leave it AS IS. +// If nil in stream, set value to nil/zero value. +// - If map, update map from stream. +// If the stream value is NIL, set the map to nil. +// - if slice, try to update up to length of array in stream. +// if container len is less than stream array length, +// and container cannot be expanded, handled (based on option). +// This means you can decode 4-element stream array into 1-element array. +// +// ------------------------------------ +// On encode, user can specify omitEmpty. This means that the value will be omitted +// if the zero value. The problem may occur during decode, where omitted values do not affect +// the value being decoded into. This means that if decoding into a struct with an +// int field with current value=5, and the field is omitted in the stream, then after +// decoding, the value will still be 5 (not 0). +// omitEmpty only works if you guarantee that you always decode into zero-values. +// +// ------------------------------------ +// We could have truncated a map to remove keys not available in the stream, +// or set values in the struct which are not in the stream to their zero values. +// We decided against it because there is no efficient way to do it. +// We may introduce it as an option later. +// However, that will require enabling it for both runtime and code generation modes. +// +// To support truncate, we need to do 2 passes over the container: +// map +// - first collect all keys (e.g. in k1) +// - for each key in stream, mark k1 that the key should not be removed +// - after updating map, do second pass and call delete for all keys in k1 which are not marked +// struct: +// - for each field, track the *typeInfo s1 +// - iterate through all s1, and for each one not marked, set value to zero +// - this involves checking the possible anonymous fields which are nil ptrs. +// too much work. +// +// ------------------------------------------ +// Error Handling is done within the library using panic. +// +// This way, the code doesn't have to keep checking if an error has happened, +// and we don't have to keep sending the error value along with each call +// or storing it in the En|Decoder and checking it constantly along the way. +// +// The disadvantage is that small functions which use panics cannot be inlined. +// The code accounts for that by only using panics behind an interface; +// since interface calls cannot be inlined, this is irrelevant. +// +// We considered storing the error is En|Decoder. +// - once it has its err field set, it cannot be used again. +// - panicing will be optional, controlled by const flag. +// - code should always check error first and return early. +// We eventually decided against it as it makes the code clumsier to always +// check for these error conditions. + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +const ( + scratchByteArrayLen = 32 + initCollectionCap = 32 // 32 is defensive. 16 is preferred. + + // Support encoding.(Binary|Text)(Unm|M)arshaler. + // This constant flag will enable or disable it. + supportMarshalInterfaces = true + + // Each Encoder or Decoder uses a cache of functions based on conditionals, + // so that the conditionals are not run every time. + // + // Either a map or a slice is used to keep track of the functions. + // The map is more natural, but has a higher cost than a slice/array. + // This flag (useMapForCodecCache) controls which is used. + // + // From benchmarks, slices with linear search perform better with < 32 entries. + // We have typically seen a high threshold of about 24 entries. + useMapForCodecCache = false + + // for debugging, set this to false, to catch panic traces. + // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. + recoverPanicToErr = true + + // if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first. + // Only concern is that, if the slice already contained some garbage, we will decode into that garbage. + // The chances of this are slim, so leave this "optimization". + // TODO: should this be true, to ensure that we always decode into a "zero" "empty" value? + resetSliceElemToZeroValue bool = false +) + +var ( + oneByteArr = [1]byte{0} + zeroByteSlice = oneByteArr[:0:0] +) + +type charEncoding uint8 + +const ( + c_RAW charEncoding = iota + c_UTF8 + c_UTF16LE + c_UTF16BE + c_UTF32LE + c_UTF32BE +) + +// valueType is the stream type +type valueType uint8 + +const ( + valueTypeUnset valueType = iota + valueTypeNil + valueTypeInt + valueTypeUint + valueTypeFloat + valueTypeBool + valueTypeString + valueTypeSymbol + valueTypeBytes + valueTypeMap + valueTypeArray + valueTypeTimestamp + valueTypeExt + + // valueTypeInvalid = 0xff +) + +type seqType uint8 + +const ( + _ seqType = iota + seqTypeArray + seqTypeSlice + seqTypeChan +) + +// note that containerMapStart and containerArraySend are not sent. +// This is because the ReadXXXStart and EncodeXXXStart already does these. +type containerState uint8 + +const ( + _ containerState = iota + + containerMapStart // slot left open, since Driver method already covers it + containerMapKey + containerMapValue + containerMapEnd + containerArrayStart // slot left open, since Driver methods already cover it + containerArrayElem + containerArrayEnd +) + +// sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo +type sfiIdx struct { + name string + index int +} + +// do not recurse if a containing type refers to an embedded type +// which refers back to its containing type (via a pointer). +// The second time this back-reference happens, break out, +// so as not to cause an infinite loop. +const rgetMaxRecursion = 2 + +// Anecdotally, we believe most types have <= 12 fields. +// Java's PMD rules set TooManyFields threshold to 15. +const rgetPoolTArrayLen = 12 + +type rgetT struct { + fNames []string + encNames []string + etypes []uintptr + sfis []*structFieldInfo +} + +type rgetPoolT struct { + fNames [rgetPoolTArrayLen]string + encNames [rgetPoolTArrayLen]string + etypes [rgetPoolTArrayLen]uintptr + sfis [rgetPoolTArrayLen]*structFieldInfo + sfiidx [rgetPoolTArrayLen]sfiIdx +} + +var rgetPool = sync.Pool{ + New: func() interface{} { return new(rgetPoolT) }, +} + +type containerStateRecv interface { + sendContainerState(containerState) +} + +// mirror json.Marshaler and json.Unmarshaler here, +// so we don't import the encoding/json package +type jsonMarshaler interface { + MarshalJSON() ([]byte, error) +} +type jsonUnmarshaler interface { + UnmarshalJSON([]byte) error +} + +var ( + bigen = binary.BigEndian + structInfoFieldName = "_struct" + + mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) + mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) + intfSliceTyp = reflect.TypeOf([]interface{}(nil)) + intfTyp = intfSliceTyp.Elem() + + stringTyp = reflect.TypeOf("") + timeTyp = reflect.TypeOf(time.Time{}) + rawExtTyp = reflect.TypeOf(RawExt{}) + rawTyp = reflect.TypeOf(Raw{}) + uint8SliceTyp = reflect.TypeOf([]uint8(nil)) + + mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() + + binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + + textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + + jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() + jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() + + selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() + + uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() + rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() + rawTypId = reflect.ValueOf(rawTyp).Pointer() + intfTypId = reflect.ValueOf(intfTyp).Pointer() + timeTypId = reflect.ValueOf(timeTyp).Pointer() + stringTypId = reflect.ValueOf(stringTyp).Pointer() + + mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() + mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() + intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() + // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() + + intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) + uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) + + bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} + bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + + chkOvf checkOverflow + + noFieldNameToStructFieldInfoErr = errors.New("no field name passed to parseStructFieldInfo") +) + +var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) + +// Selfer defines methods by which a value can encode or decode itself. +// +// Any type which implements Selfer will be able to encode or decode itself. +// Consequently, during (en|de)code, this takes precedence over +// (text|binary)(M|Unm)arshal or extension support. +type Selfer interface { + CodecEncodeSelf(*Encoder) + CodecDecodeSelf(*Decoder) +} + +// MapBySlice represents a slice which should be encoded as a map in the stream. +// The slice contains a sequence of key-value pairs. +// This affords storing a map in a specific sequence in the stream. +// +// The support of MapBySlice affords the following: +// - A slice type which implements MapBySlice will be encoded as a map +// - A slice can be decoded from a map in the stream +type MapBySlice interface { + MapBySlice() +} + +// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. +// +// BasicHandle encapsulates the common options and extension functions. +type BasicHandle struct { + // TypeInfos is used to get the type info for any type. + // + // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json + TypeInfos *TypeInfos + + extHandle + EncodeOptions + DecodeOptions +} + +func (x *BasicHandle) getBasicHandle() *BasicHandle { + return x +} + +func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + if x.TypeInfos != nil { + return x.TypeInfos.get(rtid, rt) + } + return defTypeInfos.get(rtid, rt) +} + +// Handle is the interface for a specific encoding format. +// +// Typically, a Handle is pre-configured before first time use, +// and not modified while in use. Such a pre-configured Handle +// is safe for concurrent access. +type Handle interface { + getBasicHandle() *BasicHandle + newEncDriver(w *Encoder) encDriver + newDecDriver(r *Decoder) decDriver + isBinary() bool +} + +// Raw represents raw formatted bytes. +// We "blindly" store it during encode and store the raw bytes during decode. +// Note: it is dangerous during encode, so we may gate the behaviour behind an Encode flag which must be explicitly set. +type Raw []byte + +// RawExt represents raw unprocessed extension data. +// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag. +// +// Only one of Data or Value is nil. If Data is nil, then the content of the RawExt is in the Value. +type RawExt struct { + Tag uint64 + // Data is the []byte which represents the raw ext. If Data is nil, ext is exposed in Value. + // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types + Data []byte + // Value represents the extension, if Data is nil. + // Value is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. + Value interface{} +} + +// BytesExt handles custom (de)serialization of types to/from []byte. +// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. +type BytesExt interface { + // WriteExt converts a value to a []byte. + // + // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. + WriteExt(v interface{}) []byte + + // ReadExt updates a value from a []byte. + ReadExt(dst interface{}, src []byte) +} + +// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. +// The Encoder or Decoder will then handle the further (de)serialization of that known type. +// +// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. +type InterfaceExt interface { + // ConvertExt converts a value into a simpler interface for easy encoding e.g. convert time.Time to int64. + // + // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. + ConvertExt(v interface{}) interface{} + + // UpdateExt updates a value from a simpler interface for easy decoding e.g. convert int64 to time.Time. + UpdateExt(dst interface{}, src interface{}) +} + +// Ext handles custom (de)serialization of custom types / extensions. +type Ext interface { + BytesExt + InterfaceExt +} + +// addExtWrapper is a wrapper implementation to support former AddExt exported method. +type addExtWrapper struct { + encFn func(reflect.Value) ([]byte, error) + decFn func(reflect.Value, []byte) error +} + +func (x addExtWrapper) WriteExt(v interface{}) []byte { + bs, err := x.encFn(reflect.ValueOf(v)) + if err != nil { + panic(err) + } + return bs +} + +func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { + if err := x.decFn(reflect.ValueOf(v), bs); err != nil { + panic(err) + } +} + +func (x addExtWrapper) ConvertExt(v interface{}) interface{} { + return x.WriteExt(v) +} + +func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { + x.ReadExt(dest, v.([]byte)) +} + +type setExtWrapper struct { + b BytesExt + i InterfaceExt +} + +func (x *setExtWrapper) WriteExt(v interface{}) []byte { + if x.b == nil { + panic("BytesExt.WriteExt is not supported") + } + return x.b.WriteExt(v) +} + +func (x *setExtWrapper) ReadExt(v interface{}, bs []byte) { + if x.b == nil { + panic("BytesExt.WriteExt is not supported") + + } + x.b.ReadExt(v, bs) +} + +func (x *setExtWrapper) ConvertExt(v interface{}) interface{} { + if x.i == nil { + panic("InterfaceExt.ConvertExt is not supported") + + } + return x.i.ConvertExt(v) +} + +func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) { + if x.i == nil { + panic("InterfaceExxt.UpdateExt is not supported") + + } + x.i.UpdateExt(dest, v) +} + +// type errorString string +// func (x errorString) Error() string { return string(x) } + +type binaryEncodingType struct{} + +func (_ binaryEncodingType) isBinary() bool { return true } + +type textEncodingType struct{} + +func (_ textEncodingType) isBinary() bool { return false } + +// noBuiltInTypes is embedded into many types which do not support builtins +// e.g. msgpack, simple, cbor. +type noBuiltInTypes struct{} + +func (_ noBuiltInTypes) IsBuiltinType(rt uintptr) bool { return false } +func (_ noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} +func (_ noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} + +type noStreamingCodec struct{} + +func (_ noStreamingCodec) CheckBreak() bool { return false } + +// bigenHelper. +// Users must already slice the x completely, because we will not reslice. +type bigenHelper struct { + x []byte // must be correctly sliced to appropriate len. slicing is a cost. + w encWriter +} + +func (z bigenHelper) writeUint16(v uint16) { + bigen.PutUint16(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint32(v uint32) { + bigen.PutUint32(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint64(v uint64) { + bigen.PutUint64(z.x, v) + z.w.writeb(z.x) +} + +type extTypeTagFn struct { + rtid uintptr + rt reflect.Type + tag uint64 + ext Ext +} + +type extHandle []extTypeTagFn + +// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. +// +// AddExt registes an encode and decode function for a reflect.Type. +// AddExt internally calls SetExt. +// To deregister an Ext, call AddExt with nil encfn and/or nil decfn. +func (o *extHandle) AddExt( + rt reflect.Type, tag byte, + encfn func(reflect.Value) ([]byte, error), decfn func(reflect.Value, []byte) error, +) (err error) { + if encfn == nil || decfn == nil { + return o.SetExt(rt, uint64(tag), nil) + } + return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) +} + +// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. +// +// Note that the type must be a named type, and specifically not +// a pointer or Interface. An error is returned if that is not honored. +// +// To Deregister an ext, call SetExt with nil Ext +func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { + // o is a pointer, because we may need to initialize it + if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { + err = fmt.Errorf("codec.Handle.AddExt: Takes named type, not a pointer or interface: %T", + reflect.Zero(rt).Interface()) + return + } + + rtid := reflect.ValueOf(rt).Pointer() + for _, v := range *o { + if v.rtid == rtid { + v.tag, v.ext = tag, ext + return + } + } + + if *o == nil { + *o = make([]extTypeTagFn, 0, 4) + } + *o = append(*o, extTypeTagFn{rtid, rt, tag, ext}) + return +} + +func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { + var v *extTypeTagFn + for i := range o { + v = &o[i] + if v.rtid == rtid { + return v + } + } + return nil +} + +func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn { + var v *extTypeTagFn + for i := range o { + v = &o[i] + if v.tag == tag { + return v + } + } + return nil +} + +type structFieldInfo struct { + encName string // encode name + fieldName string // field name + + // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. + + is []int // (recursive/embedded) field index in struct + i int16 // field index in struct + omitEmpty bool + toArray bool // if field is _struct, is the toArray set? +} + +// func (si *structFieldInfo) isZero() bool { +// return si.encName == "" && len(si.is) == 0 && si.i == 0 && !si.omitEmpty && !si.toArray +// } + +// rv returns the field of the struct. +// If anonymous, it returns an Invalid +func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value) { + if si.i != -1 { + v = v.Field(int(si.i)) + return v + } + // replicate FieldByIndex + for _, x := range si.is { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !update { + return + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = v.Field(x) + } + return v +} + +func (si *structFieldInfo) setToZeroValue(v reflect.Value) { + if si.i != -1 { + v = v.Field(int(si.i)) + v.Set(reflect.Zero(v.Type())) + // v.Set(reflect.New(v.Type()).Elem()) + // v.Set(reflect.New(v.Type())) + } else { + // replicate FieldByIndex + for _, x := range si.is { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + return + } + v = v.Elem() + } + v = v.Field(x) + } + v.Set(reflect.Zero(v.Type())) + } +} + +func parseStructFieldInfo(fname string, stag string) *structFieldInfo { + // if fname == "" { + // panic(noFieldNameToStructFieldInfoErr) + // } + si := structFieldInfo{ + encName: fname, + } + + if stag != "" { + for i, s := range strings.Split(stag, ",") { + if i == 0 { + if s != "" { + si.encName = s + } + } else { + if s == "omitempty" { + si.omitEmpty = true + } else if s == "toarray" { + si.toArray = true + } + } + } + } + // si.encNameBs = []byte(si.encName) + return &si +} + +type sfiSortedByEncName []*structFieldInfo + +func (p sfiSortedByEncName) Len() int { + return len(p) +} + +func (p sfiSortedByEncName) Less(i, j int) bool { + return p[i].encName < p[j].encName +} + +func (p sfiSortedByEncName) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +// typeInfo keeps information about each type referenced in the encode/decode sequence. +// +// During an encode/decode sequence, we work as below: +// - If base is a built in type, en/decode base value +// - If base is registered as an extension, en/decode base value +// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method +// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method +// - Else decode appropriately based on the reflect.Kind +type typeInfo struct { + sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. + sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. + + rt reflect.Type + rtid uintptr + + numMeth uint16 // number of methods + + // baseId gives pointer to the base reflect.Type, after deferencing + // the pointers. E.g. base type of ***time.Time is time.Time. + base reflect.Type + baseId uintptr + baseIndir int8 // number of indirections to get to base + + mbs bool // base type (T or *T) is a MapBySlice + + bm bool // base type (T or *T) is a binaryMarshaler + bunm bool // base type (T or *T) is a binaryUnmarshaler + bmIndir int8 // number of indirections to get to binaryMarshaler type + bunmIndir int8 // number of indirections to get to binaryUnmarshaler type + + tm bool // base type (T or *T) is a textMarshaler + tunm bool // base type (T or *T) is a textUnmarshaler + tmIndir int8 // number of indirections to get to textMarshaler type + tunmIndir int8 // number of indirections to get to textUnmarshaler type + + jm bool // base type (T or *T) is a jsonMarshaler + junm bool // base type (T or *T) is a jsonUnmarshaler + jmIndir int8 // number of indirections to get to jsonMarshaler type + junmIndir int8 // number of indirections to get to jsonUnmarshaler type + + cs bool // base type (T or *T) is a Selfer + csIndir int8 // number of indirections to get to Selfer type + + toArray bool // whether this (struct) type should be encoded as an array +} + +func (ti *typeInfo) indexForEncName(name string) int { + // NOTE: name may be a stringView, so don't pass it to another function. + //tisfi := ti.sfi + const binarySearchThreshold = 16 + if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { + // linear search. faster than binary search in my testing up to 16-field structs. + for i, si := range ti.sfi { + if si.encName == name { + return i + } + } + } else { + // binary search. adapted from sort/search.go. + h, i, j := 0, 0, sfilen + for i < j { + h = i + (j-i)/2 + if ti.sfi[h].encName < name { + i = h + 1 + } else { + j = h + } + } + if i < sfilen && ti.sfi[i].encName == name { + return i + } + } + return -1 +} + +// TypeInfos caches typeInfo for each type on first inspection. +// +// It is configured with a set of tag keys, which are used to get +// configuration for the type. +type TypeInfos struct { + infos map[uintptr]*typeInfo + mu sync.RWMutex + tags []string +} + +// NewTypeInfos creates a TypeInfos given a set of struct tags keys. +// +// This allows users customize the struct tag keys which contain configuration +// of their types. +func NewTypeInfos(tags []string) *TypeInfos { + return &TypeInfos{tags: tags, infos: make(map[uintptr]*typeInfo, 64)} +} + +func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { + // check for tags: codec, json, in that order. + // this allows seamless support for many configured structs. + for _, x := range x.tags { + s = t.Get(x) + if s != "" { + return s + } + } + return +} + +func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + var ok bool + x.mu.RLock() + pti, ok = x.infos[rtid] + x.mu.RUnlock() + if ok { + return + } + + // do not hold lock while computing this. + // it may lead to duplication, but that's ok. + ti := typeInfo{rt: rt, rtid: rtid} + ti.numMeth = uint16(rt.NumMethod()) + + var indir int8 + if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { + ti.bm, ti.bmIndir = true, indir + } + if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { + ti.bunm, ti.bunmIndir = true, indir + } + if ok, indir = implementsIntf(rt, textMarshalerTyp); ok { + ti.tm, ti.tmIndir = true, indir + } + if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok { + ti.tunm, ti.tunmIndir = true, indir + } + if ok, indir = implementsIntf(rt, jsonMarshalerTyp); ok { + ti.jm, ti.jmIndir = true, indir + } + if ok, indir = implementsIntf(rt, jsonUnmarshalerTyp); ok { + ti.junm, ti.junmIndir = true, indir + } + if ok, indir = implementsIntf(rt, selferTyp); ok { + ti.cs, ti.csIndir = true, indir + } + if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { + ti.mbs = true + } + + pt := rt + var ptIndir int8 + // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } + for pt.Kind() == reflect.Ptr { + pt = pt.Elem() + ptIndir++ + } + if ptIndir == 0 { + ti.base = rt + ti.baseId = rtid + } else { + ti.base = pt + ti.baseId = reflect.ValueOf(pt).Pointer() + ti.baseIndir = ptIndir + } + + if rt.Kind() == reflect.Struct { + var omitEmpty bool + if f, ok := rt.FieldByName(structInfoFieldName); ok { + siInfo := parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag)) + ti.toArray = siInfo.toArray + omitEmpty = siInfo.omitEmpty + } + pi := rgetPool.Get() + pv := pi.(*rgetPoolT) + pv.etypes[0] = ti.baseId + vv := rgetT{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} + x.rget(rt, rtid, omitEmpty, nil, &vv) + ti.sfip, ti.sfi = rgetResolveSFI(vv.sfis, pv.sfiidx[:0]) + rgetPool.Put(pi) + } + // sfi = sfip + + x.mu.Lock() + if pti, ok = x.infos[rtid]; !ok { + pti = &ti + x.infos[rtid] = pti + } + x.mu.Unlock() + return +} + +func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, + indexstack []int, pv *rgetT, +) { + // Read up fields and store how to access the value. + // + // It uses go's rules for message selectors, + // which say that the field with the shallowest depth is selected. + // + // Note: we consciously use slices, not a map, to simulate a set. + // Typically, types have < 16 fields, + // and iteration using equals is faster than maps there + +LOOP: + for j, jlen := 0, rt.NumField(); j < jlen; j++ { + f := rt.Field(j) + fkind := f.Type.Kind() + // skip if a func type, or is unexported, or structTag value == "-" + switch fkind { + case reflect.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer: + continue LOOP + } + + // if r1, _ := utf8.DecodeRuneInString(f.Name); + // r1 == utf8.RuneError || !unicode.IsUpper(r1) { + if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded + continue + } + stag := x.structTag(f.Tag) + if stag == "-" { + continue + } + var si *structFieldInfo + // if anonymous and no struct tag (or it's blank), + // and a struct (or pointer to struct), inline it. + if f.Anonymous && fkind != reflect.Interface { + doInline := stag == "" + if !doInline { + si = parseStructFieldInfo("", stag) + doInline = si.encName == "" + // doInline = si.isZero() + } + if doInline { + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if ft.Kind() == reflect.Struct { + // if etypes contains this, don't call rget again (as fields are already seen here) + ftid := reflect.ValueOf(ft).Pointer() + // We cannot recurse forever, but we need to track other field depths. + // So - we break if we see a type twice (not the first time). + // This should be sufficient to handle an embedded type that refers to its + // owning type, which then refers to its embedded type. + processIt := true + numk := 0 + for _, k := range pv.etypes { + if k == ftid { + numk++ + if numk == rgetMaxRecursion { + processIt = false + break + } + } + } + if processIt { + pv.etypes = append(pv.etypes, ftid) + indexstack2 := make([]int, len(indexstack)+1) + copy(indexstack2, indexstack) + indexstack2[len(indexstack)] = j + // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + x.rget(ft, ftid, omitEmpty, indexstack2, pv) + } + continue + } + } + } + + // after the anonymous dance: if an unexported field, skip + if f.PkgPath != "" { // unexported + continue + } + + if f.Name == "" { + panic(noFieldNameToStructFieldInfoErr) + } + + pv.fNames = append(pv.fNames, f.Name) + + if si == nil { + si = parseStructFieldInfo(f.Name, stag) + } else if si.encName == "" { + si.encName = f.Name + } + si.fieldName = f.Name + + pv.encNames = append(pv.encNames, si.encName) + + // si.ikind = int(f.Type.Kind()) + if len(indexstack) == 0 { + si.i = int16(j) + } else { + si.i = -1 + si.is = make([]int, len(indexstack)+1) + copy(si.is, indexstack) + si.is[len(indexstack)] = j + // si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + } + + if omitEmpty { + si.omitEmpty = true + } + pv.sfis = append(pv.sfis, si) + } +} + +// resolves the struct field info got from a call to rget. +// Returns a trimmed, unsorted and sorted []*structFieldInfo. +func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo) { + var n int + for i, v := range x { + xn := v.encName //TODO: fieldName or encName? use encName for now. + var found bool + for j, k := range pv { + if k.name == xn { + // one of them must be reset to nil, and the index updated appropriately to the other one + if len(v.is) == len(x[k.index].is) { + } else if len(v.is) < len(x[k.index].is) { + pv[j].index = i + if x[k.index] != nil { + x[k.index] = nil + n++ + } + } else { + if x[i] != nil { + x[i] = nil + n++ + } + } + found = true + break + } + } + if !found { + pv = append(pv, sfiIdx{xn, i}) + } + } + + // remove all the nils + y = make([]*structFieldInfo, len(x)-n) + n = 0 + for _, v := range x { + if v == nil { + continue + } + y[n] = v + n++ + } + + z = make([]*structFieldInfo, len(y)) + copy(z, y) + sort.Sort(sfiSortedByEncName(z)) + return +} + +func panicToErr(err *error) { + if recoverPanicToErr { + if x := recover(); x != nil { + //debug.PrintStack() + panicValToErr(x, err) + } + } +} + +// func doPanic(tag string, format string, params ...interface{}) { +// params2 := make([]interface{}, len(params)+1) +// params2[0] = tag +// copy(params2[1:], params) +// panic(fmt.Errorf("%s: "+format, params2...)) +// } + +func isImmutableKind(k reflect.Kind) (v bool) { + return false || + k == reflect.Int || + k == reflect.Int8 || + k == reflect.Int16 || + k == reflect.Int32 || + k == reflect.Int64 || + k == reflect.Uint || + k == reflect.Uint8 || + k == reflect.Uint16 || + k == reflect.Uint32 || + k == reflect.Uint64 || + k == reflect.Uintptr || + k == reflect.Float32 || + k == reflect.Float64 || + k == reflect.Bool || + k == reflect.String +} + +// these functions must be inlinable, and not call anybody +type checkOverflow struct{} + +func (_ checkOverflow) Float32(f float64) (overflow bool) { + if f < 0 { + f = -f + } + return math.MaxFloat32 < f && f <= math.MaxFloat64 +} + +func (_ checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} + +func (_ checkOverflow) Int(v int64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} + +func (_ checkOverflow) SignedInt(v uint64) (i int64, overflow bool) { + //e.g. -127 to 128 for int8 + pos := (v >> 63) == 0 + ui2 := v & 0x7fffffffffffffff + if pos { + if ui2 > math.MaxInt64 { + overflow = true + return + } + } else { + if ui2 > math.MaxInt64-1 { + overflow = true + return + } + } + i = int64(v) + return +} + +// ------------------ SORT ----------------- + +func isNaN(f float64) bool { return f != f } + +// ----------------------- + +type intSlice []int64 +type uintSlice []uint64 +type floatSlice []float64 +type boolSlice []bool +type stringSlice []string +type bytesSlice [][]byte + +func (p intSlice) Len() int { return len(p) } +func (p intSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p uintSlice) Len() int { return len(p) } +func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p floatSlice) Len() int { return len(p) } +func (p floatSlice) Less(i, j int) bool { + return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j]) +} +func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p stringSlice) Len() int { return len(p) } +func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p bytesSlice) Len() int { return len(p) } +func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 } +func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p boolSlice) Len() int { return len(p) } +func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] } +func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// --------------------- + +type intRv struct { + v int64 + r reflect.Value +} +type intRvSlice []intRv +type uintRv struct { + v uint64 + r reflect.Value +} +type uintRvSlice []uintRv +type floatRv struct { + v float64 + r reflect.Value +} +type floatRvSlice []floatRv +type boolRv struct { + v bool + r reflect.Value +} +type boolRvSlice []boolRv +type stringRv struct { + v string + r reflect.Value +} +type stringRvSlice []stringRv +type bytesRv struct { + v []byte + r reflect.Value +} +type bytesRvSlice []bytesRv + +func (p intRvSlice) Len() int { return len(p) } +func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p uintRvSlice) Len() int { return len(p) } +func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p floatRvSlice) Len() int { return len(p) } +func (p floatRvSlice) Less(i, j int) bool { + return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v) +} +func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p stringRvSlice) Len() int { return len(p) } +func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p bytesRvSlice) Len() int { return len(p) } +func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } +func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p boolRvSlice) Len() int { return len(p) } +func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v } +func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ----------------- + +type bytesI struct { + v []byte + i interface{} +} + +type bytesISlice []bytesI + +func (p bytesISlice) Len() int { return len(p) } +func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } +func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ----------------- + +type set []uintptr + +func (s *set) add(v uintptr) (exists bool) { + // e.ci is always nil, or len >= 1 + // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Add: %v, exists: %v\n", v, exists) }() + x := *s + if x == nil { + x = make([]uintptr, 1, 8) + x[0] = v + *s = x + return + } + // typically, length will be 1. make this perform. + if len(x) == 1 { + if j := x[0]; j == 0 { + x[0] = v + } else if j == v { + exists = true + } else { + x = append(x, v) + *s = x + } + return + } + // check if it exists + for _, j := range x { + if j == v { + exists = true + return + } + } + // try to replace a "deleted" slot + for i, j := range x { + if j == 0 { + x[i] = v + return + } + } + // if unable to replace deleted slot, just append it. + x = append(x, v) + *s = x + return +} + +func (s *set) remove(v uintptr) (exists bool) { + // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Rm: %v, exists: %v\n", v, exists) }() + x := *s + if len(x) == 0 { + return + } + if len(x) == 1 { + if x[0] == v { + x[0] = 0 + } + return + } + for i, j := range x { + if j == v { + exists = true + x[i] = 0 // set it to 0, as way to delete it. + // copy(x[i:], x[i+1:]) + // x = x[:len(x)-1] + return + } + } + return +} diff --git a/vendor/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/ugorji/go/codec/helper_internal.go new file mode 100644 index 000000000..5d0727f77 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper_internal.go @@ -0,0 +1,242 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// All non-std package dependencies live in this file, +// so porting to different environment is easy (just update functions). + +import ( + "errors" + "fmt" + "math" + "reflect" +) + +func panicValToErr(panicVal interface{}, err *error) { + if panicVal == nil { + return + } + // case nil + switch xerr := panicVal.(type) { + case error: + *err = xerr + case string: + *err = errors.New(xerr) + default: + *err = fmt.Errorf("%v", panicVal) + } + return +} + +func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { + switch v.Kind() { + case reflect.Invalid: + return true + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if deref { + if v.IsNil() { + return true + } + return hIsEmptyValue(v.Elem(), deref, checkStruct) + } else { + return v.IsNil() + } + case reflect.Struct: + if !checkStruct { + return false + } + // return true if all fields are empty. else return false. + // we cannot use equality check, because some fields may be maps/slices/etc + // and consequently the structs are not comparable. + // return v.Interface() == reflect.Zero(v.Type()).Interface() + for i, n := 0, v.NumField(); i < n; i++ { + if !hIsEmptyValue(v.Field(i), deref, checkStruct) { + return false + } + } + return true + } + return false +} + +func isEmptyValue(v reflect.Value, deref, checkStruct bool) bool { + return hIsEmptyValue(v, deref, checkStruct) +} + +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } + } + return +} + +func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { + if typ == nil { + return + } + rt := typ + // The type might be a pointer and we need to keep + // dereferencing to the base type until we find an implementation. + for { + if rt.Implements(iTyp) { + return true, indir + } + if p := rt; p.Kind() == reflect.Ptr { + indir++ + if indir >= math.MaxInt8 { // insane number of indirections + return false, 0 + } + rt = p.Elem() + continue + } + break + } + // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. + if typ.Kind() != reflect.Ptr { + // Not a pointer, but does the pointer work? + if reflect.PtrTo(typ).Implements(iTyp) { + return true, -1 + } + } + return false, 0 +} + +// validate that this function is correct ... +// culled from OGRE (Object-Oriented Graphics Rendering Engine) +// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html) +func halfFloatToFloatBits(yy uint16) (d uint32) { + y := uint32(yy) + s := (y >> 15) & 0x01 + e := (y >> 10) & 0x1f + m := y & 0x03ff + + if e == 0 { + if m == 0 { // plu or minus 0 + return s << 31 + } else { // Denormalized number -- renormalize it + for (m & 0x00000400) == 0 { + m <<= 1 + e -= 1 + } + e += 1 + const zz uint32 = 0x0400 + m &= ^zz + } + } else if e == 31 { + if m == 0 { // Inf + return (s << 31) | 0x7f800000 + } else { // NaN + return (s << 31) | 0x7f800000 | (m << 13) + } + } + e = e + (127 - 15) + m = m << 13 + return (s << 31) | (e << 23) | m +} + +// GrowCap will return a new capacity for a slice, given the following: +// - oldCap: current capacity +// - unit: in-memory size of an element +// - num: number of elements to add +func growCap(oldCap, unit, num int) (newCap int) { + // appendslice logic (if cap < 1024, *2, else *1.25): + // leads to many copy calls, especially when copying bytes. + // bytes.Buffer model (2*cap + n): much better for bytes. + // smarter way is to take the byte-size of the appended element(type) into account + + // maintain 3 thresholds: + // t1: if cap <= t1, newcap = 2x + // t2: if cap <= t2, newcap = 1.75x + // t3: if cap <= t3, newcap = 1.5x + // else newcap = 1.25x + // + // t1, t2, t3 >= 1024 always. + // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same) + // + // With this, appending for bytes increase by: + // 100% up to 4K + // 75% up to 8K + // 50% up to 16K + // 25% beyond that + + // unit can be 0 e.g. for struct{}{}; handle that appropriately + var t1, t2, t3 int // thresholds + if unit <= 1 { + t1, t2, t3 = 4*1024, 8*1024, 16*1024 + } else if unit < 16 { + t3 = 16 / unit * 1024 + t1 = t3 * 1 / 4 + t2 = t3 * 2 / 4 + } else { + t1, t2, t3 = 1024, 1024, 1024 + } + + var x int // temporary variable + + // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively + if oldCap <= t1 { // [0,t1] + x = 8 + } else if oldCap > t3 { // (t3,infinity] + x = 5 + } else if oldCap <= t2 { // (t1,t2] + x = 7 + } else { // (t2,t3] + x = 6 + } + newCap = x * oldCap / 4 + + if num > 0 { + newCap += num + } + + // ensure newCap is a multiple of 64 (if it is > 64) or 16. + if newCap > 64 { + if x = newCap % 64; x != 0 { + x = newCap / 64 + newCap = 64 * (x + 1) + } + } else { + if x = newCap % 16; x != 0 { + x = newCap / 16 + newCap = 16 * (x + 1) + } + } + return +} + +func expandSliceValue(s reflect.Value, num int) reflect.Value { + if num <= 0 { + return s + } + l0 := s.Len() + l1 := l0 + num // new slice length + if l1 < l0 { + panic("ExpandSlice: slice overflow") + } + c0 := s.Cap() + if l1 <= c0 { + return s.Slice(0, l1) + } + st := s.Type() + c1 := growCap(c0, int(st.Elem().Size()), num) + s2 := reflect.MakeSlice(st, l1, c1) + // println("expandslicevalue: cap-old: ", c0, ", cap-new: ", c1, ", len-new: ", l1) + reflect.Copy(s2, s) + return s2 +} diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go new file mode 100644 index 000000000..8b06a0045 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go @@ -0,0 +1,20 @@ +// +build !unsafe + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// stringView returns a view of the []byte as a string. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +func stringView(v []byte) string { + return string(v) +} + +// bytesView returns a view of the string as a []byte. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +func bytesView(v string) []byte { + return []byte(v) +} diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go new file mode 100644 index 000000000..0f596c71a --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper_unsafe.go @@ -0,0 +1,49 @@ +// +build unsafe + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "unsafe" +) + +// This file has unsafe variants of some helper methods. + +type unsafeString struct { + Data uintptr + Len int +} + +type unsafeSlice struct { + Data uintptr + Len int + Cap int +} + +// stringView returns a view of the []byte as a string. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +func stringView(v []byte) string { + if len(v) == 0 { + return "" + } + + bx := (*unsafeSlice)(unsafe.Pointer(&v)) + sx := unsafeString{bx.Data, bx.Len} + return *(*string)(unsafe.Pointer(&sx)) +} + +// bytesView returns a view of the string as a []byte. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +func bytesView(v string) []byte { + if len(v) == 0 { + return zeroByteSlice + } + + sx := (*unsafeString)(unsafe.Pointer(&v)) + bx := unsafeSlice{sx.Data, sx.Len, sx.Len} + return *(*[]byte)(unsafe.Pointer(&bx)) +} diff --git a/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go new file mode 100644 index 000000000..5bb389628 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/json.go @@ -0,0 +1,1234 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// By default, this json support uses base64 encoding for bytes, because you cannot +// store and read any arbitrary string in json (only unicode). +// However, the user can configre how to encode/decode bytes. +// +// This library specifically supports UTF-8 for encoding and decoding only. +// +// Note that the library will happily encode/decode things which are not valid +// json e.g. a map[int64]string. We do it for consistency. With valid json, +// we will encode and decode appropriately. +// Users can specify their map type if necessary to force it. +// +// Note: +// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently. +// We implement it here. +// - Also, strconv.ParseXXX for floats and integers +// - only works on strings resulting in unnecessary allocation and []byte-string conversion. +// - it does a lot of redundant checks, because json numbers are simpler that what it supports. +// - We parse numbers (floats and integers) directly here. +// We only delegate parsing floats if it is a hairy float which could cause a loss of precision. +// In that case, we delegate to strconv.ParseFloat. +// +// Note: +// - encode does not beautify. There is no whitespace when encoding. +// - rpc calls which take single integer arguments or write single numeric arguments will need care. + +// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver +// MUST not call one-another. + +import ( + "bytes" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "unicode/utf16" + "unicode/utf8" +) + +//-------------------------------- + +var ( + jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'} + + jsonFloat64Pow10 = [...]float64{ + 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + 1e20, 1e21, 1e22, + } + + jsonUint64Pow10 = [...]uint64{ + 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + } + + // jsonTabs and jsonSpaces are used as caches for indents + jsonTabs, jsonSpaces string +) + +const ( + // jsonUnreadAfterDecNum controls whether we unread after decoding a number. + // + // instead of unreading, just update d.tok (iff it's not a whitespace char) + // However, doing this means that we may HOLD onto some data which belongs to another stream. + // Thus, it is safest to unread the data when done. + // keep behind a constant flag for now. + jsonUnreadAfterDecNum = true + + // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: + // - If we see first character of null, false or true, + // do not validate subsequent characters. + // - e.g. if we see a n, assume null and skip next 3 characters, + // and do not validate they are ull. + // P.S. Do not expect a significant decoding boost from this. + jsonValidateSymbols = true + + // if jsonTruncateMantissa, truncate mantissa if trailing 0's. + // This is important because it could allow some floats to be decoded without + // deferring to strconv.ParseFloat. + jsonTruncateMantissa = true + + // if mantissa >= jsonNumUintCutoff before multiplying by 10, this is an overflow + jsonNumUintCutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base) + + // if mantissa >= jsonNumUintMaxVal, this is an overflow + jsonNumUintMaxVal = 1< 1<<53 || v < -(1<<53)) { + e.w.writen1('"') + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) + e.w.writen1('"') + return + } + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) +} + +func (e *jsonEncDriver) EncodeUint(v uint64) { + if x := e.h.IntegerAsString; x == 'A' || x == 'L' && v > 1<<53 { + e.w.writen1('"') + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) + e.w.writen1('"') + return + } + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) +} + +func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + if v := ext.ConvertExt(rv); v == nil { + e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + // only encodes re.Value (never re.Data) + if re.Value == nil { + e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil() + } else { + en.encode(re.Value) + } +} + +func (e *jsonEncDriver) EncodeArrayStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('[') + e.c = containerArrayStart +} + +func (e *jsonEncDriver) EncodeMapStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('{') + e.c = containerMapStart +} + +func (e *jsonEncDriver) EncodeString(c charEncoding, v string) { + // e.w.writestr(strconv.Quote(v)) + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeSymbol(v string) { + // e.EncodeString(c_UTF8, v) + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + // if encoding raw bytes and RawBytesExt is configured, use it to encode + if c == c_RAW && e.se.i != nil { + e.EncodeExt(v, 0, &e.se, e.e) + return + } + if c == c_RAW { + slen := base64.StdEncoding.EncodedLen(len(v)) + if cap(e.bs) >= slen { + e.bs = e.bs[:slen] + } else { + e.bs = make([]byte, slen) + } + base64.StdEncoding.Encode(e.bs, v) + e.w.writen1('"') + e.w.writeb(e.bs) + e.w.writen1('"') + } else { + // e.EncodeString(c, string(v)) + e.quoteStr(stringView(v)) + } +} + +func (e *jsonEncDriver) EncodeAsis(v []byte) { + e.w.writeb(v) +} + +func (e *jsonEncDriver) quoteStr(s string) { + // adapted from std pkg encoding/json + const hex = "0123456789abcdef" + w := e.w + w.writen1('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + w.writestr(s[start:i]) + } + switch b { + case '\\', '"': + w.writen2('\\', b) + case '\n': + w.writen2('\\', 'n') + case '\r': + w.writen2('\\', 'r') + case '\b': + w.writen2('\\', 'b') + case '\f': + w.writen2('\\', 'f') + case '\t': + w.writen2('\\', 't') + default: + // encode all bytes < 0x20 (except \r, \n). + // also encode < > & to prevent security holes when served to some browsers. + w.writestr(`\u00`) + w.writen2(hex[b>>4], hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. + // Both technically valid JSON, but bomb on JSONP, so fix here. + if c == '\u2028' || c == '\u2029' { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\u202`) + w.writen1(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + w.writestr(s[start:]) + } + w.writen1('"') +} + +//-------------------------------- + +type jsonNum struct { + // bytes []byte // may have [+-.eE0-9] + mantissa uint64 // where mantissa ends, and maybe dot begins. + exponent int16 // exponent value. + manOverflow bool + neg bool // started with -. No initial sign in the bytes above. + dot bool // has dot + explicitExponent bool // explicit exponent +} + +func (x *jsonNum) reset() { + x.manOverflow = false + x.neg = false + x.dot = false + x.explicitExponent = false + x.mantissa = 0 + x.exponent = 0 +} + +// uintExp is called only if exponent > 0. +func (x *jsonNum) uintExp() (n uint64, overflow bool) { + n = x.mantissa + e := x.exponent + if e >= int16(len(jsonUint64Pow10)) { + overflow = true + return + } + n *= jsonUint64Pow10[e] + if n < x.mantissa || n > jsonNumUintMaxVal { + overflow = true + return + } + return + // for i := int16(0); i < e; i++ { + // if n >= jsonNumUintCutoff { + // overflow = true + // return + // } + // n *= 10 + // } + // return +} + +// these constants are only used withn floatVal. +// They are brought out, so that floatVal can be inlined. +const ( + jsonUint64MantissaBits = 52 + jsonMaxExponent = int16(len(jsonFloat64Pow10)) - 1 +) + +func (x *jsonNum) floatVal() (f float64, parseUsingStrConv bool) { + // We do not want to lose precision. + // Consequently, we will delegate to strconv.ParseFloat if any of the following happen: + // - There are more digits than in math.MaxUint64: 18446744073709551615 (20 digits) + // We expect up to 99.... (19 digits) + // - The mantissa cannot fit into a 52 bits of uint64 + // - The exponent is beyond our scope ie beyong 22. + parseUsingStrConv = x.manOverflow || + x.exponent > jsonMaxExponent || + (x.exponent < 0 && -(x.exponent) > jsonMaxExponent) || + x.mantissa>>jsonUint64MantissaBits != 0 + + if parseUsingStrConv { + return + } + + // all good. so handle parse here. + f = float64(x.mantissa) + // fmt.Printf(".Float: uint64 value: %v, float: %v\n", m, f) + if x.neg { + f = -f + } + if x.exponent > 0 { + f *= jsonFloat64Pow10[x.exponent] + } else if x.exponent < 0 { + f /= jsonFloat64Pow10[-x.exponent] + } + return +} + +type jsonDecDriver struct { + noBuiltInTypes + d *Decoder + h *JsonHandle + r decReader + + c containerState + // tok is used to store the token read right after skipWhiteSpace. + tok uint8 + + bstr [8]byte // scratch used for string \UXXX parsing + b [64]byte // scratch, used for parsing strings or numbers + b2 [64]byte // scratch, used only for decodeBytes (after base64) + bs []byte // scratch. Initialized from b. Used for parsing strings or numbers. + + se setExtWrapper + + n jsonNum +} + +func jsonIsWS(b byte) bool { + return b == ' ' || b == '\t' || b == '\r' || b == '\n' +} + +// // This will skip whitespace characters and return the next byte to read. +// // The next byte determines what the value will be one of. +// func (d *jsonDecDriver) skipWhitespace() { +// // fast-path: do not enter loop. Just check first (in case no whitespace). +// b := d.r.readn1() +// if jsonIsWS(b) { +// r := d.r +// for b = r.readn1(); jsonIsWS(b); b = r.readn1() { +// } +// } +// d.tok = b +// } + +func (d *jsonDecDriver) uncacheRead() { + if d.tok != 0 { + d.r.unreadn1() + d.tok = 0 + } +} + +func (d *jsonDecDriver) sendContainerState(c containerState) { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + var xc uint8 // char expected + if c == containerMapKey { + if d.c != containerMapStart { + xc = ',' + } + } else if c == containerMapValue { + xc = ':' + } else if c == containerMapEnd { + xc = '}' + } else if c == containerArrayElem { + if d.c != containerArrayStart { + xc = ',' + } + } else if c == containerArrayEnd { + xc = ']' + } + if xc != 0 { + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = c +} + +func (d *jsonDecDriver) CheckBreak() bool { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if d.tok == '}' || d.tok == ']' { + // d.tok = 0 // only checking, not consuming + return true + } + return false +} + +func (d *jsonDecDriver) readStrIdx(fromIdx, toIdx uint8) { + bs := d.r.readx(int(toIdx - fromIdx)) + d.tok = 0 + if jsonValidateSymbols { + if !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) { + d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:toIdx], bs) + return + } + } +} + +func (d *jsonDecDriver) TryDecodeAsNil() bool { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if d.tok == 'n' { + d.readStrIdx(10, 13) // ull + return true + } + return false +} + +func (d *jsonDecDriver) DecodeBool() bool { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if d.tok == 'f' { + d.readStrIdx(5, 9) // alse + return false + } + if d.tok == 't' { + d.readStrIdx(1, 4) // rue + return true + } + d.d.errorf("json: decode bool: got first char %c", d.tok) + return false // "unreachable" +} + +func (d *jsonDecDriver) ReadMapStart() int { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if d.tok != '{' { + d.d.errorf("json: expect char '%c' but got char '%c'", '{', d.tok) + } + d.tok = 0 + d.c = containerMapStart + return -1 +} + +func (d *jsonDecDriver) ReadArrayStart() int { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if d.tok != '[' { + d.d.errorf("json: expect char '%c' but got char '%c'", '[', d.tok) + } + d.tok = 0 + d.c = containerArrayStart + return -1 +} + +func (d *jsonDecDriver) ContainerType() (vt valueType) { + // check container type by checking the first char + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if b := d.tok; b == '{' { + return valueTypeMap + } else if b == '[' { + return valueTypeArray + } else if b == 'n' { + return valueTypeNil + } else if b == '"' { + return valueTypeString + } + return valueTypeUnset + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // return false // "unreachable" +} + +func (d *jsonDecDriver) decNum(storeBytes bool) { + // If it is has a . or an e|E, decode as a float; else decode as an int. + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + b := d.tok + var str bool + if b == '"' { + str = true + b = d.r.readn1() + } + if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) { + d.d.errorf("json: decNum: got first char '%c'", b) + return + } + d.tok = 0 + + const cutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base) + const jsonNumUintMaxVal = 1<= jsonNumUintCutoff { + n.manOverflow = true + break + } + v := uint64(b - '0') + n.mantissa *= 10 + if v != 0 { + n1 := n.mantissa + v + if n1 < n.mantissa || n1 > jsonNumUintMaxVal { + n.manOverflow = true // n+v overflows + break + } + n.mantissa = n1 + } + case 6: + state = 7 + fallthrough + case 7: + if !(b == '0' && e == 0) { + e = e*10 + int16(b-'0') + } + default: + break LOOP + } + case '"': + if str { + if storeBytes { + d.bs = append(d.bs, '"') + } + b, eof = r.readn1eof() + } + break LOOP + default: + break LOOP + } + if storeBytes { + d.bs = append(d.bs, b) + } + b, eof = r.readn1eof() + } + + if jsonTruncateMantissa && n.mantissa != 0 { + for n.mantissa%10 == 0 { + n.mantissa /= 10 + n.exponent++ + } + } + + if e != 0 { + if eNeg { + n.exponent -= e + } else { + n.exponent += e + } + } + + // d.n = n + + if !eof { + if jsonUnreadAfterDecNum { + r.unreadn1() + } else { + if !jsonIsWS(b) { + d.tok = b + } + } + } + // fmt.Printf("1: n: bytes: %s, neg: %v, dot: %v, exponent: %v, mantissaEndIndex: %v\n", + // n.bytes, n.neg, n.dot, n.exponent, n.mantissaEndIndex) + return +} + +func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) { + d.decNum(false) + n := &d.n + if n.manOverflow { + d.d.errorf("json: overflow integer after: %v", n.mantissa) + return + } + var u uint64 + if n.exponent == 0 { + u = n.mantissa + } else if n.exponent < 0 { + d.d.errorf("json: fractional integer") + return + } else if n.exponent > 0 { + var overflow bool + if u, overflow = n.uintExp(); overflow { + d.d.errorf("json: overflow integer") + return + } + } + i = int64(u) + if n.neg { + i = -i + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs) + return + } + // fmt.Printf("DecodeInt: %v\n", i) + return +} + +// floatVal MUST only be called after a decNum, as d.bs now contains the bytes of the number +func (d *jsonDecDriver) floatVal() (f float64) { + f, useStrConv := d.n.floatVal() + if useStrConv { + var err error + if f, err = strconv.ParseFloat(stringView(d.bs), 64); err != nil { + panic(fmt.Errorf("parse float: %s, %v", d.bs, err)) + } + if d.n.neg { + f = -f + } + } + return +} + +func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) { + d.decNum(false) + n := &d.n + if n.neg { + d.d.errorf("json: unsigned integer cannot be negative") + return + } + if n.manOverflow { + d.d.errorf("json: overflow integer after: %v", n.mantissa) + return + } + if n.exponent == 0 { + u = n.mantissa + } else if n.exponent < 0 { + d.d.errorf("json: fractional integer") + return + } else if n.exponent > 0 { + var overflow bool + if u, overflow = n.uintExp(); overflow { + d.d.errorf("json: overflow integer") + return + } + } + if chkOvf.Uint(u, bitsize) { + d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs) + return + } + // fmt.Printf("DecodeUint: %v\n", u) + return +} + +func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + d.decNum(true) + f = d.floatVal() + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("json: overflow float32: %v, %s", f, d.bs) + return + } + return +} + +func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if ext == nil { + re := rv.(*RawExt) + re.Tag = xtag + d.d.decode(&re.Value) + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + return +} + +func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { + // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. + if !isstring && d.se.i != nil { + bsOut = bs + d.DecodeExt(&bsOut, 0, &d.se) + return + } + d.appendStringAsBytes() + // if isstring, then just return the bytes, even if it is using the scratch buffer. + // the bytes will be converted to a string as needed. + if isstring { + return d.bs + } + // if appendStringAsBytes returned a zero-len slice, then treat as nil. + // This should only happen for null, and "". + if len(d.bs) == 0 { + return nil + } + bs0 := d.bs + slen := base64.StdEncoding.DecodedLen(len(bs0)) + if slen <= cap(bs) { + bsOut = bs[:slen] + } else if zerocopy && slen <= cap(d.b2) { + bsOut = d.b2[:slen] + } else { + bsOut = make([]byte, slen) + } + slen2, err := base64.StdEncoding.Decode(bsOut, bs0) + if err != nil { + d.d.errorf("json: error decoding base64 binary '%s': %v", bs0, err) + return nil + } + if slen != slen2 { + bsOut = bsOut[:slen2] + } + return +} + +func (d *jsonDecDriver) DecodeString() (s string) { + d.appendStringAsBytes() + // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key + if d.c == containerMapKey { + return d.d.string(d.bs) + } + return string(d.bs) +} + +func (d *jsonDecDriver) appendStringAsBytes() { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + + // handle null as a string + if d.tok == 'n' { + d.readStrIdx(10, 13) // ull + d.bs = d.bs[:0] + return + } + + if d.tok != '"' { + d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok) + } + d.tok = 0 + + v := d.bs[:0] + var c uint8 + r := d.r + for { + c = r.readn1() + if c == '"' { + break + } else if c == '\\' { + c = r.readn1() + switch c { + case '"', '\\', '/', '\'': + v = append(v, c) + case 'b': + v = append(v, '\b') + case 'f': + v = append(v, '\f') + case 'n': + v = append(v, '\n') + case 'r': + v = append(v, '\r') + case 't': + v = append(v, '\t') + case 'u': + rr := d.jsonU4(false) + // fmt.Printf("$$$$$$$$$: is surrogate: %v\n", utf16.IsSurrogate(rr)) + if utf16.IsSurrogate(rr) { + rr = utf16.DecodeRune(rr, d.jsonU4(true)) + } + w2 := utf8.EncodeRune(d.bstr[:], rr) + v = append(v, d.bstr[:w2]...) + default: + d.d.errorf("json: unsupported escaped value: %c", c) + } + } else { + v = append(v, c) + } + } + d.bs = v +} + +func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune { + r := d.r + if checkSlashU && !(r.readn1() == '\\' && r.readn1() == 'u') { + d.d.errorf(`json: unquoteStr: invalid unicode sequence. Expecting \u`) + return 0 + } + // u, _ := strconv.ParseUint(string(d.bstr[:4]), 16, 64) + var u uint32 + for i := 0; i < 4; i++ { + v := r.readn1() + if '0' <= v && v <= '9' { + v = v - '0' + } else if 'a' <= v && v <= 'z' { + v = v - 'a' + 10 + } else if 'A' <= v && v <= 'Z' { + v = v - 'A' + 10 + } else { + d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, v) + return 0 + } + u = u*16 + uint32(v) + } + return rune(u) +} + +func (d *jsonDecDriver) DecodeNaked() { + z := &d.d.n + // var decodeFurther bool + + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + switch d.tok { + case 'n': + d.readStrIdx(10, 13) // ull + z.v = valueTypeNil + case 'f': + d.readStrIdx(5, 9) // alse + z.v = valueTypeBool + z.b = false + case 't': + d.readStrIdx(1, 4) // rue + z.v = valueTypeBool + z.b = true + case '{': + z.v = valueTypeMap + // d.tok = 0 // don't consume. kInterfaceNaked will call ReadMapStart + // decodeFurther = true + case '[': + z.v = valueTypeArray + // d.tok = 0 // don't consume. kInterfaceNaked will call ReadArrayStart + // decodeFurther = true + case '"': + z.v = valueTypeString + z.s = d.DecodeString() + default: // number + d.decNum(true) + n := &d.n + // if the string had a any of [.eE], then decode as float. + switch { + case n.explicitExponent, n.dot, n.exponent < 0, n.manOverflow: + z.v = valueTypeFloat + z.f = d.floatVal() + case n.exponent == 0: + u := n.mantissa + switch { + case n.neg: + z.v = valueTypeInt + z.i = -int64(u) + case d.h.SignedInteger: + z.v = valueTypeInt + z.i = int64(u) + default: + z.v = valueTypeUint + z.u = u + } + default: + u, overflow := n.uintExp() + switch { + case overflow: + z.v = valueTypeFloat + z.f = d.floatVal() + case n.neg: + z.v = valueTypeInt + z.i = -int64(u) + case d.h.SignedInteger: + z.v = valueTypeInt + z.i = int64(u) + default: + z.v = valueTypeUint + z.u = u + } + } + // fmt.Printf("DecodeNaked: Number: %T, %v\n", v, v) + } + // if decodeFurther { + // d.s.sc.retryRead() + // } + return +} + +//---------------------- + +// JsonHandle is a handle for JSON encoding format. +// +// Json is comprehensively supported: +// - decodes numbers into interface{} as int, uint or float64 +// - configurable way to encode/decode []byte . +// by default, encodes and decodes []byte using base64 Std Encoding +// - UTF-8 support for encoding and decoding +// +// It has better performance than the json library in the standard library, +// by leveraging the performance improvements of the codec library and +// minimizing allocations. +// +// In addition, it doesn't read more bytes than necessary during a decode, which allows +// reading multiple values from a stream containing json and non-json content. +// For example, a user can read a json value, then a cbor value, then a msgpack value, +// all from the same stream in sequence. +type JsonHandle struct { + textEncodingType + BasicHandle + // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. + // If not configured, raw bytes are encoded to/from base64 text. + RawBytesExt InterfaceExt + + // Indent indicates how a value is encoded. + // - If positive, indent by that number of spaces. + // - If negative, indent by that number of tabs. + Indent int8 + + // IntegerAsString controls how integers (signed and unsigned) are encoded. + // + // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. + // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. + // This can be mitigated by configuring how to encode integers. + // + // IntegerAsString interpretes the following values: + // - if 'L', then encode integers > 2^53 as a json string. + // - if 'A', then encode all integers as a json string + // containing the exact integer representation as a decimal. + // - else encode all integers as a json number (default) + IntegerAsString uint8 +} + +func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{i: ext}) +} + +func (h *JsonHandle) newEncDriver(e *Encoder) encDriver { + hd := jsonEncDriver{e: e, h: h} + hd.bs = hd.b[:0] + + hd.reset() + + return &hd +} + +func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { + // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} + hd := jsonDecDriver{d: d, h: h} + hd.bs = hd.b[:0] + hd.reset() + return &hd +} + +func (e *jsonEncDriver) reset() { + e.w = e.e.w + e.se.i = e.h.RawBytesExt + if e.bs != nil { + e.bs = e.bs[:0] + } + e.d, e.dt, e.dl, e.ds = false, false, 0, "" + e.c = 0 + if e.h.Indent > 0 { + e.d = true + e.ds = jsonSpaces[:e.h.Indent] + } else if e.h.Indent < 0 { + e.d = true + e.dt = true + e.ds = jsonTabs[:-(e.h.Indent)] + } +} + +func (d *jsonDecDriver) reset() { + d.r = d.d.r + d.se.i = d.h.RawBytesExt + if d.bs != nil { + d.bs = d.bs[:0] + } + d.c, d.tok = 0, 0 + d.n.reset() +} + +var jsonEncodeTerminate = []byte{' '} + +func (h *JsonHandle) rpcEncodeTerminate() []byte { + return jsonEncodeTerminate +} + +var _ decDriver = (*jsonDecDriver)(nil) +var _ encDriver = (*jsonEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go new file mode 100644 index 000000000..e79830b56 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/msgpack.go @@ -0,0 +1,852 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +MSGPACK + +Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. +We need to maintain compatibility with it and how it encodes integer values +without caring about the type. + +For compatibility with behaviour of msgpack-c reference implementation: + - Go intX (>0) and uintX + IS ENCODED AS + msgpack +ve fixnum, unsigned + - Go intX (<0) + IS ENCODED AS + msgpack -ve fixnum, signed + +*/ +package codec + +import ( + "fmt" + "io" + "math" + "net/rpc" + "reflect" +) + +const ( + mpPosFixNumMin byte = 0x00 + mpPosFixNumMax = 0x7f + mpFixMapMin = 0x80 + mpFixMapMax = 0x8f + mpFixArrayMin = 0x90 + mpFixArrayMax = 0x9f + mpFixStrMin = 0xa0 + mpFixStrMax = 0xbf + mpNil = 0xc0 + _ = 0xc1 + mpFalse = 0xc2 + mpTrue = 0xc3 + mpFloat = 0xca + mpDouble = 0xcb + mpUint8 = 0xcc + mpUint16 = 0xcd + mpUint32 = 0xce + mpUint64 = 0xcf + mpInt8 = 0xd0 + mpInt16 = 0xd1 + mpInt32 = 0xd2 + mpInt64 = 0xd3 + + // extensions below + mpBin8 = 0xc4 + mpBin16 = 0xc5 + mpBin32 = 0xc6 + mpExt8 = 0xc7 + mpExt16 = 0xc8 + mpExt32 = 0xc9 + mpFixExt1 = 0xd4 + mpFixExt2 = 0xd5 + mpFixExt4 = 0xd6 + mpFixExt8 = 0xd7 + mpFixExt16 = 0xd8 + + mpStr8 = 0xd9 // new + mpStr16 = 0xda + mpStr32 = 0xdb + + mpArray16 = 0xdc + mpArray32 = 0xdd + + mpMap16 = 0xde + mpMap32 = 0xdf + + mpNegFixNumMin = 0xe0 + mpNegFixNumMax = 0xff +) + +// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec +// that the backend RPC service takes multiple arguments, which have been arranged +// in sequence in the slice. +// +// The Codec then passes it AS-IS to the rpc service (without wrapping it in an +// array of 1 element). +type MsgpackSpecRpcMultiArgs []interface{} + +// A MsgpackContainer type specifies the different types of msgpackContainers. +type msgpackContainerType struct { + fixCutoff int + bFixMin, b8, b16, b32 byte + hasFixMin, has8, has8Always bool +} + +var ( + msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} + msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} + msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} + msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} +) + +//--------------------------------------------- + +type msgpackEncDriver struct { + noBuiltInTypes + encNoSeparator + e *Encoder + w encWriter + h *MsgpackHandle + x [8]byte +} + +func (e *msgpackEncDriver) EncodeNil() { + e.w.writen1(mpNil) +} + +func (e *msgpackEncDriver) EncodeInt(i int64) { + if i >= 0 { + e.EncodeUint(uint64(i)) + } else if i >= -32 { + e.w.writen1(byte(i)) + } else if i >= math.MinInt8 { + e.w.writen2(mpInt8, byte(i)) + } else if i >= math.MinInt16 { + e.w.writen1(mpInt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i >= math.MinInt32 { + e.w.writen1(mpInt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpInt64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) EncodeUint(i uint64) { + if i <= math.MaxInt8 { + e.w.writen1(byte(i)) + } else if i <= math.MaxUint8 { + e.w.writen2(mpUint8, byte(i)) + } else if i <= math.MaxUint16 { + e.w.writen1(mpUint16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i <= math.MaxUint32 { + e.w.writen1(mpUint32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpUint64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(mpTrue) + } else { + e.w.writen1(mpFalse) + } +} + +func (e *msgpackEncDriver) EncodeFloat32(f float32) { + e.w.writen1(mpFloat) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *msgpackEncDriver) EncodeFloat64(f float64) { + e.w.writen1(mpDouble) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(v) + if bs == nil { + e.EncodeNil() + return + } + if e.h.WriteExt { + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) + } else { + e.EncodeStringBytes(c_RAW, bs) + } +} + +func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { + if l == 1 { + e.w.writen2(mpFixExt1, xtag) + } else if l == 2 { + e.w.writen2(mpFixExt2, xtag) + } else if l == 4 { + e.w.writen2(mpFixExt4, xtag) + } else if l == 8 { + e.w.writen2(mpFixExt8, xtag) + } else if l == 16 { + e.w.writen2(mpFixExt16, xtag) + } else if l < 256 { + e.w.writen2(mpExt8, byte(l)) + e.w.writen1(xtag) + } else if l < 65536 { + e.w.writen1(mpExt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + e.w.writen1(xtag) + } else { + e.w.writen1(mpExt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + e.w.writen1(xtag) + } +} + +func (e *msgpackEncDriver) EncodeArrayStart(length int) { + e.writeContainerLen(msgpackContainerList, length) +} + +func (e *msgpackEncDriver) EncodeMapStart(length int) { + e.writeContainerLen(msgpackContainerMap, length) +} + +func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(s)) + } else { + e.writeContainerLen(msgpackContainerStr, len(s)) + } + if len(s) > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriver) EncodeSymbol(v string) { + e.EncodeString(c_UTF8, v) +} + +func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(bs)) + } else { + e.writeContainerLen(msgpackContainerStr, len(bs)) + } + if len(bs) > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { + if ct.hasFixMin && l < ct.fixCutoff { + e.w.writen1(ct.bFixMin | byte(l)) + } else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) { + e.w.writen2(ct.b8, uint8(l)) + } else if l < 65536 { + e.w.writen1(ct.b16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + } else { + e.w.writen1(ct.b32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + } +} + +//--------------------------------------------- + +type msgpackDecDriver struct { + d *Decoder + r decReader // *Decoder decReader decReaderT + h *MsgpackHandle + b [scratchByteArrayLen]byte + bd byte + bdRead bool + br bool // bytes reader + noBuiltInTypes + noStreamingCodec + decNoSeparator +} + +// Note: This returns either a primitive (int, bool, etc) for non-containers, +// or a containerType, or a specific type denoting nil or extension. +// It is called when a nil interface{} is passed, leaving it up to the DecDriver +// to introspect the stream and decide how best to decode. +// It deciphers the value by looking at the stream first. +func (d *msgpackDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + n := &d.d.n + var decodeFurther bool + + switch bd { + case mpNil: + n.v = valueTypeNil + d.bdRead = false + case mpFalse: + n.v = valueTypeBool + n.b = false + case mpTrue: + n.v = valueTypeBool + n.b = true + + case mpFloat: + n.v = valueTypeFloat + n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + case mpDouble: + n.v = valueTypeFloat + n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + + case mpUint8: + n.v = valueTypeUint + n.u = uint64(d.r.readn1()) + case mpUint16: + n.v = valueTypeUint + n.u = uint64(bigen.Uint16(d.r.readx(2))) + case mpUint32: + n.v = valueTypeUint + n.u = uint64(bigen.Uint32(d.r.readx(4))) + case mpUint64: + n.v = valueTypeUint + n.u = uint64(bigen.Uint64(d.r.readx(8))) + + case mpInt8: + n.v = valueTypeInt + n.i = int64(int8(d.r.readn1())) + case mpInt16: + n.v = valueTypeInt + n.i = int64(int16(bigen.Uint16(d.r.readx(2)))) + case mpInt32: + n.v = valueTypeInt + n.i = int64(int32(bigen.Uint32(d.r.readx(4)))) + case mpInt64: + n.v = valueTypeInt + n.i = int64(int64(bigen.Uint64(d.r.readx(8)))) + + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + // positive fixnum (always signed) + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + // negative fixnum + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + n.v = valueTypeString + n.s = d.DecodeString() + } else { + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + } + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + n.v = valueTypeArray + decodeFurther = true + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + n.v = valueTypeMap + decodeFurther = true + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + n.v = valueTypeExt + clen := d.readExtLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(clen) + default: + d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + } + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } + return +} + +// int can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) DecodeInt(bitsize uint8) (i int64) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case mpUint8: + i = int64(uint64(d.r.readn1())) + case mpUint16: + i = int64(uint64(bigen.Uint16(d.r.readx(2)))) + case mpUint32: + i = int64(uint64(bigen.Uint32(d.r.readx(4)))) + case mpUint64: + i = int64(bigen.Uint64(d.r.readx(8))) + case mpInt8: + i = int64(int8(d.r.readn1())) + case mpInt16: + i = int64(int16(bigen.Uint16(d.r.readx(2)))) + case mpInt32: + i = int64(int32(bigen.Uint32(d.r.readx(4)))) + case mpInt64: + i = int64(bigen.Uint64(d.r.readx(8))) + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + i = int64(int8(d.bd)) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + i = int64(int8(d.bd)) + default: + d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + return + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + d.d.errorf("Overflow int value: %v", i) + return + } + } + d.bdRead = false + return +} + +// uint can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case mpUint8: + ui = uint64(d.r.readn1()) + case mpUint16: + ui = uint64(bigen.Uint16(d.r.readx(2))) + case mpUint32: + ui = uint64(bigen.Uint32(d.r.readx(4))) + case mpUint64: + ui = bigen.Uint64(d.r.readx(8)) + case mpInt8: + if i := int64(int8(d.r.readn1())); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt16: + if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt32: + if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt64: + if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + ui = uint64(d.bd) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + d.d.errorf("Assigning negative signed value: %v, to unsigned type", int(d.bd)) + return + default: + d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + return + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + d.d.errorf("Overflow uint value: %v", ui) + return + } + } + d.bdRead = false + return +} + +// float can either be decoded from msgpack type: float, double or intX +func (d *msgpackDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFloat { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == mpDouble { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { + f = float64(d.DecodeInt(0)) + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("msgpack: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool, fixnum 0 or 1. +func (d *msgpackDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFalse || d.bd == 0 { + // b = false + } else if d.bd == mpTrue || d.bd == 1 { + b = true + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + var clen int + // ignore isstring. Expect that the bytes may be found from msgpackContainerStr or msgpackContainerBin + if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) + } else { + clen = d.readContainerLen(msgpackContainerStr) + } + // println("DecodeBytes: clen: ", clen) + d.bdRead = false + // bytes may be nil, so handle it. if nil, clen=-1. + if clen < 0 { + return nil + } + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, bs) +} + +func (d *msgpackDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.b[:], true, true)) +} + +func (d *msgpackDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *msgpackDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *msgpackDecDriver) ContainerType() (vt valueType) { + bd := d.bd + if bd == mpNil { + return valueTypeNil + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 || + (!d.h.RawToString && + (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) { + return valueTypeBytes + } else if d.h.RawToString && + (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) { + return valueTypeString + } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + return valueTypeArray + } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpNil { + d.bdRead = false + v = true + } + return +} + +func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { + bd := d.bd + if bd == mpNil { + clen = -1 // to represent nil + } else if bd == ct.b8 { + clen = int(d.r.readn1()) + } else if bd == ct.b16 { + clen = int(bigen.Uint16(d.r.readx(2))) + } else if bd == ct.b32 { + clen = int(bigen.Uint32(d.r.readx(4))) + } else if (ct.bFixMin & bd) == ct.bFixMin { + clen = int(ct.bFixMin ^ bd) + } else { + d.d.errorf("readContainerLen: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) + return + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) ReadMapStart() int { + return d.readContainerLen(msgpackContainerMap) +} + +func (d *msgpackDecDriver) ReadArrayStart() int { + return d.readContainerLen(msgpackContainerList) +} + +func (d *msgpackDecDriver) readExtLen() (clen int) { + switch d.bd { + case mpNil: + clen = -1 // to represent nil + case mpFixExt1: + clen = 1 + case mpFixExt2: + clen = 2 + case mpFixExt4: + clen = 4 + case mpFixExt8: + clen = 8 + case mpFixExt16: + clen = 16 + case mpExt8: + clen = int(d.r.readn1()) + case mpExt16: + clen = int(bigen.Uint16(d.r.readx(2))) + case mpExt32: + clen = int(bigen.Uint32(d.r.readx(4))) + default: + d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) + return + } + return +} + +func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + xbd := d.bd + if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { + xbs = d.DecodeBytes(nil, false, true) + } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || + (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { + xbs = d.DecodeBytes(nil, true, true) + } else { + clen := d.readExtLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(clen) + } + d.bdRead = false + return +} + +//-------------------------------------------------- + +//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. +type MsgpackHandle struct { + BasicHandle + + // RawToString controls how raw bytes are decoded into a nil interface{}. + RawToString bool + + // WriteExt flag supports encoding configured extensions with extension tags. + // It also controls whether other elements of the new spec are encoded (ie Str8). + // + // With WriteExt=false, configured extensions are serialized as raw bytes + // and Str8 is not encoded. + // + // A stream can still be decoded into a typed value, provided an appropriate value + // is provided, but the type cannot be inferred from the stream. If no appropriate + // type is provided (e.g. decoding into a nil interface{}), you get back + // a []byte or string based on the setting of RawToString. + WriteExt bool + binaryEncodingType +} + +func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + +func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver { + return &msgpackEncDriver{e: e, w: e.w, h: h} +} + +func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver { + return &msgpackDecDriver{d: d, r: d.r, h: h, br: d.bytes} +} + +func (e *msgpackEncDriver) reset() { + e.w = e.e.w +} + +func (d *msgpackDecDriver) reset() { + d.r = d.d.r + d.bd, d.bdRead = 0, false +} + +//-------------------------------------------------- + +type msgpackSpecRpcCodec struct { + rpcCodec +} + +// /////////////// Spec RPC Codec /////////////////// +func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // WriteRequest can write to both a Go service, and other services that do + // not abide by the 1 argument rule of a Go service. + // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs + var bodyArr []interface{} + if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { + bodyArr = ([]interface{})(m) + } else { + bodyArr = []interface{}{body} + } + r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + var moe interface{} + if r.Error != "" { + moe = r.Error + } + if moe != nil && body != nil { + body = nil + } + r2 := []interface{}{1, uint32(r.Seq), moe, body} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.parseCustomHeader(1, &r.Seq, &r.Error) +} + +func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) +} + +func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { + if body == nil { // read and discard + return c.read(nil) + } + bodyArr := []interface{}{body} + return c.read(&bodyArr) +} + +func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { + + if c.isClosed() { + return io.EOF + } + + // We read the response header by hand + // so that the body can be decoded on its own from the stream at a later time. + + const fia byte = 0x94 //four item array descriptor value + // Not sure why the panic of EOF is swallowed above. + // if bs1 := c.dec.r.readn1(); bs1 != fia { + // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) + // return + // } + var b byte + b, err = c.br.ReadByte() + if err != nil { + return + } + if b != fia { + err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) + return + } + + if err = c.read(&b); err != nil { + return + } + if b != expectTypeByte { + err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) + return + } + if err = c.read(msgid); err != nil { + return + } + if err = c.read(methodOrError); err != nil { + return + } + return +} + +//-------------------------------------------------- + +// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol +// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md +type msgpackSpecRpc struct{} + +// MsgpackSpecRpc implements Rpc using the communication protocol defined in +// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var MsgpackSpecRpc msgpackSpecRpc + +func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +var _ decDriver = (*msgpackDecDriver)(nil) +var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/noop.go b/vendor/github.com/ugorji/go/codec/noop.go new file mode 100644 index 000000000..cfee3d084 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/noop.go @@ -0,0 +1,213 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math/rand" + "time" +) + +// NoopHandle returns a no-op handle. It basically does nothing. +// It is only useful for benchmarking, as it gives an idea of the +// overhead from the codec framework. +// +// LIBRARY USERS: *** DO NOT USE *** +func NoopHandle(slen int) *noopHandle { + h := noopHandle{} + h.rand = rand.New(rand.NewSource(time.Now().UnixNano())) + h.B = make([][]byte, slen) + h.S = make([]string, slen) + for i := 0; i < len(h.S); i++ { + b := make([]byte, i+1) + for j := 0; j < len(b); j++ { + b[j] = 'a' + byte(i) + } + h.B[i] = b + h.S[i] = string(b) + } + return &h +} + +// noopHandle does nothing. +// It is used to simulate the overhead of the codec framework. +type noopHandle struct { + BasicHandle + binaryEncodingType + noopDrv // noopDrv is unexported here, so we can get a copy of it when needed. +} + +type noopDrv struct { + d *Decoder + e *Encoder + i int + S []string + B [][]byte + mks []bool // stack. if map (true), else if array (false) + mk bool // top of stack. what container are we on? map or array? + ct valueType // last response for IsContainerType. + cb int // counter for ContainerType + rand *rand.Rand +} + +func (h *noopDrv) r(v int) int { return h.rand.Intn(v) } +func (h *noopDrv) m(v int) int { h.i++; return h.i % v } + +func (h *noopDrv) newEncDriver(e *Encoder) encDriver { h.e = e; return h } +func (h *noopDrv) newDecDriver(d *Decoder) decDriver { h.d = d; return h } + +func (h *noopDrv) reset() {} +func (h *noopDrv) uncacheRead() {} + +// --- encDriver + +// stack functions (for map and array) +func (h *noopDrv) start(b bool) { + // println("start", len(h.mks)+1) + h.mks = append(h.mks, b) + h.mk = b +} +func (h *noopDrv) end() { + // println("end: ", len(h.mks)-1) + h.mks = h.mks[:len(h.mks)-1] + if len(h.mks) > 0 { + h.mk = h.mks[len(h.mks)-1] + } else { + h.mk = false + } +} + +func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {} +func (h *noopDrv) EncodeNil() {} +func (h *noopDrv) EncodeInt(i int64) {} +func (h *noopDrv) EncodeUint(i uint64) {} +func (h *noopDrv) EncodeBool(b bool) {} +func (h *noopDrv) EncodeFloat32(f float32) {} +func (h *noopDrv) EncodeFloat64(f float64) {} +func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {} +func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) } +func (h *noopDrv) EncodeMapStart(length int) { h.start(false) } +func (h *noopDrv) EncodeEnd() { h.end() } + +func (h *noopDrv) EncodeString(c charEncoding, v string) {} +func (h *noopDrv) EncodeSymbol(v string) {} +func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {} + +func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {} + +// ---- decDriver +func (h *noopDrv) initReadNext() {} +func (h *noopDrv) CheckBreak() bool { return false } +func (h *noopDrv) IsBuiltinType(rt uintptr) bool { return false } +func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{}) {} +func (h *noopDrv) DecodeInt(bitsize uint8) (i int64) { return int64(h.m(15)) } +func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.m(35)) } +func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) } +func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 } +func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] } + +// func (h *noopDrv) DecodeStringAsBytes(bs []byte) []byte { return h.DecodeBytes(bs) } + +func (h *noopDrv) DecodeBytes(bs []byte, isstring, zerocopy bool) []byte { return h.B[h.m(len(h.B))] } + +func (h *noopDrv) ReadEnd() { h.end() } + +// toggle map/slice +func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) } +func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) } + +func (h *noopDrv) ContainerType() (vt valueType) { + // return h.m(2) == 0 + // handle kStruct, which will bomb is it calls this and doesn't get back a map or array. + // consequently, if the return value is not map or array, reset it to one of them based on h.m(7) % 2 + // for kstruct: at least one out of every 2 times, return one of valueTypeMap or Array (else kstruct bombs) + // however, every 10th time it is called, we just return something else. + var vals = [...]valueType{valueTypeArray, valueTypeMap} + // ------------ TAKE ------------ + // if h.cb%2 == 0 { + // if h.ct == valueTypeMap || h.ct == valueTypeArray { + // } else { + // h.ct = vals[h.m(2)] + // } + // } else if h.cb%5 == 0 { + // h.ct = valueType(h.m(8)) + // } else { + // h.ct = vals[h.m(2)] + // } + // ------------ TAKE ------------ + // if h.cb%16 == 0 { + // h.ct = valueType(h.cb % 8) + // } else { + // h.ct = vals[h.cb%2] + // } + h.ct = vals[h.cb%2] + h.cb++ + return h.ct + + // if h.ct == valueTypeNil || h.ct == valueTypeString || h.ct == valueTypeBytes { + // return h.ct + // } + // return valueTypeUnset + // TODO: may need to tweak this so it works. + // if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap { + // h.cb = !h.cb + // h.ct = vt + // return h.cb + // } + // // go in a loop and check it. + // h.ct = vt + // h.cb = h.m(7) == 0 + // return h.cb +} +func (h *noopDrv) TryDecodeAsNil() bool { + if h.mk { + return false + } else { + return h.m(8) == 0 + } +} +func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 { + return 0 +} + +func (h *noopDrv) DecodeNaked() { + // use h.r (random) not h.m() because h.m() could cause the same value to be given. + var sk int + if h.mk { + // if mapkey, do not support values of nil OR bytes, array, map or rawext + sk = h.r(7) + 1 + } else { + sk = h.r(12) + } + n := &h.d.n + switch sk { + case 0: + n.v = valueTypeNil + case 1: + n.v, n.b = valueTypeBool, false + case 2: + n.v, n.b = valueTypeBool, true + case 3: + n.v, n.i = valueTypeInt, h.DecodeInt(64) + case 4: + n.v, n.u = valueTypeUint, h.DecodeUint(64) + case 5: + n.v, n.f = valueTypeFloat, h.DecodeFloat(true) + case 6: + n.v, n.f = valueTypeFloat, h.DecodeFloat(false) + case 7: + n.v, n.s = valueTypeString, h.DecodeString() + case 8: + n.v, n.l = valueTypeBytes, h.B[h.m(len(h.B))] + case 9: + n.v = valueTypeArray + case 10: + n.v = valueTypeMap + default: + n.v = valueTypeExt + n.u = h.DecodeUint(64) + n.l = h.B[h.m(len(h.B))] + } + h.ct = n.v + return +} diff --git a/vendor/github.com/ugorji/go/codec/prebuild.go b/vendor/github.com/ugorji/go/codec/prebuild.go new file mode 100644 index 000000000..2353263e8 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/prebuild.go @@ -0,0 +1,3 @@ +package codec + +//go:generate bash prebuild.sh diff --git a/vendor/github.com/ugorji/go/codec/rpc.go b/vendor/github.com/ugorji/go/codec/rpc.go new file mode 100644 index 000000000..8062bed31 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/rpc.go @@ -0,0 +1,180 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bufio" + "io" + "net/rpc" + "sync" +) + +// rpcEncodeTerminator allows a handler specify a []byte terminator to send after each Encode. +// +// Some codecs like json need to put a space after each encoded value, to serve as a +// delimiter for things like numbers (else json codec will continue reading till EOF). +type rpcEncodeTerminator interface { + rpcEncodeTerminate() []byte +} + +// Rpc provides a rpc Server or Client Codec for rpc communication. +type Rpc interface { + ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec + ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec +} + +// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer +// used by the rpc connection. It accommodates use-cases where the connection +// should be used by rpc and non-rpc functions, e.g. streaming a file after +// sending an rpc response. +type RpcCodecBuffered interface { + BufferedReader() *bufio.Reader + BufferedWriter() *bufio.Writer +} + +// ------------------------------------- + +// rpcCodec defines the struct members and common methods. +type rpcCodec struct { + rwc io.ReadWriteCloser + dec *Decoder + enc *Encoder + bw *bufio.Writer + br *bufio.Reader + mu sync.Mutex + h Handle + + cls bool + clsmu sync.RWMutex +} + +func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { + bw := bufio.NewWriter(conn) + br := bufio.NewReader(conn) + return rpcCodec{ + rwc: conn, + bw: bw, + br: br, + enc: NewEncoder(bw, h), + dec: NewDecoder(br, h), + h: h, + } +} + +func (c *rpcCodec) BufferedReader() *bufio.Reader { + return c.br +} + +func (c *rpcCodec) BufferedWriter() *bufio.Writer { + return c.bw +} + +func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) { + if c.isClosed() { + return io.EOF + } + if err = c.enc.Encode(obj1); err != nil { + return + } + t, tOk := c.h.(rpcEncodeTerminator) + if tOk { + c.bw.Write(t.rpcEncodeTerminate()) + } + if writeObj2 { + if err = c.enc.Encode(obj2); err != nil { + return + } + if tOk { + c.bw.Write(t.rpcEncodeTerminate()) + } + } + if doFlush { + return c.bw.Flush() + } + return +} + +func (c *rpcCodec) read(obj interface{}) (err error) { + if c.isClosed() { + return io.EOF + } + //If nil is passed in, we should still attempt to read content to nowhere. + if obj == nil { + var obj2 interface{} + return c.dec.Decode(&obj2) + } + return c.dec.Decode(obj) +} + +func (c *rpcCodec) isClosed() bool { + c.clsmu.RLock() + x := c.cls + c.clsmu.RUnlock() + return x +} + +func (c *rpcCodec) Close() error { + if c.isClosed() { + return io.EOF + } + c.clsmu.Lock() + c.cls = true + c.clsmu.Unlock() + return c.rwc.Close() +} + +func (c *rpcCodec) ReadResponseBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +type goRpcCodec struct { + rpcCodec +} + +func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // Must protect for concurrent access as per API + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) +} + +func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) +} + +func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +// goRpc is the implementation of Rpc that uses the communication protocol +// as defined in net/rpc package. +type goRpc struct{} + +// GoRpc implements Rpc using the communication protocol defined in net/rpc package. +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var GoRpc goRpc + +func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered diff --git a/vendor/github.com/ugorji/go/codec/simple.go b/vendor/github.com/ugorji/go/codec/simple.go new file mode 100644 index 000000000..d07208c87 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/simple.go @@ -0,0 +1,526 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" +) + +const ( + _ uint8 = iota + simpleVdNil = 1 + simpleVdFalse = 2 + simpleVdTrue = 3 + simpleVdFloat32 = 4 + simpleVdFloat64 = 5 + + // each lasts for 4 (ie n, n+1, n+2, n+3) + simpleVdPosInt = 8 + simpleVdNegInt = 12 + + // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) + simpleVdString = 216 + simpleVdByteArray = 224 + simpleVdArray = 232 + simpleVdMap = 240 + simpleVdExt = 248 +) + +type simpleEncDriver struct { + noBuiltInTypes + encNoSeparator + e *Encoder + h *SimpleHandle + w encWriter + b [8]byte +} + +func (e *simpleEncDriver) EncodeNil() { + e.w.writen1(simpleVdNil) +} + +func (e *simpleEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(simpleVdTrue) + } else { + e.w.writen1(simpleVdFalse) + } +} + +func (e *simpleEncDriver) EncodeFloat32(f float32) { + e.w.writen1(simpleVdFloat32) + bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *simpleEncDriver) EncodeFloat64(f float64) { + e.w.writen1(simpleVdFloat64) + bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *simpleEncDriver) EncodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-v), simpleVdNegInt) + } else { + e.encUint(uint64(v), simpleVdPosInt) + } +} + +func (e *simpleEncDriver) EncodeUint(v uint64) { + e.encUint(v, simpleVdPosInt) +} + +func (e *simpleEncDriver) encUint(v uint64, bd uint8) { + if v <= math.MaxUint8 { + e.w.writen2(bd, uint8(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd + 1) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd + 2) + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) + } else { // if v <= math.MaxUint64 { + e.w.writen1(bd + 3) + bigenHelper{e.b[:8], e.w}.writeUint64(v) + } +} + +func (e *simpleEncDriver) encLen(bd byte, length int) { + if length == 0 { + e.w.writen1(bd) + } else if length <= math.MaxUint8 { + e.w.writen1(bd + 1) + e.w.writen1(uint8(length)) + } else if length <= math.MaxUint16 { + e.w.writen1(bd + 2) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length)) + } else if int64(length) <= math.MaxUint32 { + e.w.writen1(bd + 3) + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length)) + } else { + e.w.writen1(bd + 4) + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length)) + } +} + +func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(rv) + if bs == nil { + e.EncodeNil() + return + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +} + +func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(simpleVdExt, length) + e.w.writen1(xtag) +} + +func (e *simpleEncDriver) EncodeArrayStart(length int) { + e.encLen(simpleVdArray, length) +} + +func (e *simpleEncDriver) EncodeMapStart(length int) { + e.encLen(simpleVdMap, length) +} + +func (e *simpleEncDriver) EncodeString(c charEncoding, v string) { + e.encLen(simpleVdString, len(v)) + e.w.writestr(v) +} + +func (e *simpleEncDriver) EncodeSymbol(v string) { + e.EncodeString(c_UTF8, v) +} + +func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + e.encLen(simpleVdByteArray, len(v)) + e.w.writeb(v) +} + +//------------------------------------ + +type simpleDecDriver struct { + d *Decoder + h *SimpleHandle + r decReader + bdRead bool + bd byte + br bool // bytes reader + noBuiltInTypes + noStreamingCodec + decNoSeparator + b [scratchByteArrayLen]byte +} + +func (d *simpleDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *simpleDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *simpleDecDriver) ContainerType() (vt valueType) { + if d.bd == simpleVdNil { + return valueTypeNil + } else if d.bd == simpleVdByteArray || d.bd == simpleVdByteArray+1 || + d.bd == simpleVdByteArray+2 || d.bd == simpleVdByteArray+3 || d.bd == simpleVdByteArray+4 { + return valueTypeBytes + } else if d.bd == simpleVdString || d.bd == simpleVdString+1 || + d.bd == simpleVdString+2 || d.bd == simpleVdString+3 || d.bd == simpleVdString+4 { + return valueTypeString + } else if d.bd == simpleVdArray || d.bd == simpleVdArray+1 || + d.bd == simpleVdArray+2 || d.bd == simpleVdArray+3 || d.bd == simpleVdArray+4 { + return valueTypeArray + } else if d.bd == simpleVdMap || d.bd == simpleVdMap+1 || + d.bd == simpleVdMap+2 || d.bd == simpleVdMap+3 || d.bd == simpleVdMap+4 { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *simpleDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return true + } + return false +} + +func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case simpleVdPosInt: + ui = uint64(d.r.readn1()) + case simpleVdPosInt + 1: + ui = uint64(bigen.Uint16(d.r.readx(2))) + case simpleVdPosInt + 2: + ui = uint64(bigen.Uint32(d.r.readx(4))) + case simpleVdPosInt + 3: + ui = uint64(bigen.Uint64(d.r.readx(8))) + case simpleVdNegInt: + ui = uint64(d.r.readn1()) + neg = true + case simpleVdNegInt + 1: + ui = uint64(bigen.Uint16(d.r.readx(2))) + neg = true + case simpleVdNegInt + 2: + ui = uint64(bigen.Uint32(d.r.readx(4))) + neg = true + case simpleVdNegInt + 3: + ui = uint64(bigen.Uint64(d.r.readx(8))) + neg = true + default: + d.d.errorf("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) + return + } + // don't do this check, because callers may only want the unsigned value. + // if ui > math.MaxInt64 { + // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui) + // return + // } + return +} + +func (d *simpleDecDriver) DecodeInt(bitsize uint8) (i int64) { + ui, neg := d.decCheckInteger() + i, overflow := chkOvf.SignedInt(ui) + if overflow { + d.d.errorf("simple: overflow converting %v to signed integer", ui) + return + } + if neg { + i = -i + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("simple: overflow integer: %v", i) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + ui, neg := d.decCheckInteger() + if neg { + d.d.errorf("Assigning negative signed value to unsigned type") + return + } + if chkOvf.Uint(ui, bitsize) { + d.d.errorf("simple: overflow integer: %v", ui) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdFloat32 { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == simpleVdFloat64 { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { + if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { + f = float64(d.DecodeInt(64)) + } else { + d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd) + return + } + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("msgpack: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *simpleDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdTrue { + b = true + } else if d.bd == simpleVdFalse { + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) ReadMapStart() (length int) { + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) ReadArrayStart() (length int) { + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) decLen() int { + switch d.bd % 8 { + case 0: + return 0 + case 1: + return int(d.r.readn1()) + case 2: + return int(bigen.Uint16(d.r.readx(2))) + case 3: + ui := uint64(bigen.Uint32(d.r.readx(4))) + if chkOvf.Uint(ui, intBitsize) { + d.d.errorf("simple: overflow integer: %v", ui) + return 0 + } + return int(ui) + case 4: + ui := bigen.Uint64(d.r.readx(8)) + if chkOvf.Uint(ui, intBitsize) { + d.d.errorf("simple: overflow integer: %v", ui) + return 0 + } + return int(ui) + } + d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) + return -1 +} + +func (d *simpleDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.b[:], true, true)) +} + +func (d *simpleDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return + } + clen := d.decLen() + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, bs) +} + +func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(l) + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + xbs = d.DecodeBytes(nil, false, true) + default: + d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := &d.d.n + var decodeFurther bool + + switch d.bd { + case simpleVdNil: + n.v = valueTypeNil + case simpleVdFalse: + n.v = valueTypeBool + n.b = false + case simpleVdTrue: + n.v = valueTypeBool + n.b = true + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt(64) + } else { + n.v = valueTypeUint + n.u = d.DecodeUint(64) + } + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + n.v = valueTypeInt + n.i = d.DecodeInt(64) + case simpleVdFloat32: + n.v = valueTypeFloat + n.f = d.DecodeFloat(true) + case simpleVdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat(false) + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + n.v = valueTypeString + n.s = d.DecodeString() + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + n.v = valueTypeExt + l := d.decLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(l) + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + n.v = valueTypeArray + decodeFurther = true + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + n.v = valueTypeMap + decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +//------------------------------------ + +// SimpleHandle is a Handle for a very simple encoding format. +// +// simple is a simplistic codec similar to binc, but not as compact. +// - Encoding of a value is always preceded by the descriptor byte (bd) +// - True, false, nil are encoded fully in 1 byte (the descriptor) +// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). +// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. +// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) +// - Lenght of containers (strings, bytes, array, map, extensions) +// are encoded in 0, 1, 2, 4 or 8 bytes. +// Zero-length containers have no length encoded. +// For others, the number of bytes is given by pow(2, bd%3) +// - maps are encoded as [bd] [length] [[key][value]]... +// - arrays are encoded as [bd] [length] [value]... +// - extensions are encoded as [bd] [length] [tag] [byte]... +// - strings/bytearrays are encoded as [bd] [length] [byte]... +// +// The full spec will be published soon. +type SimpleHandle struct { + BasicHandle + binaryEncodingType +} + +func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + +func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver { + return &simpleEncDriver{e: e, w: e.w, h: h} +} + +func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver { + return &simpleDecDriver{d: d, r: d.r, h: h, br: d.bytes} +} + +func (e *simpleEncDriver) reset() { + e.w = e.e.w +} + +func (d *simpleDecDriver) reset() { + d.r = d.d.r + d.bd, d.bdRead = 0, false +} + +var _ decDriver = (*simpleDecDriver)(nil) +var _ encDriver = (*simpleEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/time.go b/vendor/github.com/ugorji/go/codec/time.go new file mode 100644 index 000000000..718b731ec --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/time.go @@ -0,0 +1,233 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "fmt" + "reflect" + "time" +) + +var ( + timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} + timeExtEncFn = func(rv reflect.Value) (bs []byte, err error) { + defer panicToErr(&err) + bs = timeExt{}.WriteExt(rv.Interface()) + return + } + timeExtDecFn = func(rv reflect.Value, bs []byte) (err error) { + defer panicToErr(&err) + timeExt{}.ReadExt(rv.Interface(), bs) + return + } +) + +type timeExt struct{} + +func (x timeExt) WriteExt(v interface{}) (bs []byte) { + switch v2 := v.(type) { + case time.Time: + bs = encodeTime(v2) + case *time.Time: + bs = encodeTime(*v2) + default: + panic(fmt.Errorf("unsupported format for time conversion: expecting time.Time; got %T", v2)) + } + return +} +func (x timeExt) ReadExt(v interface{}, bs []byte) { + tt, err := decodeTime(bs) + if err != nil { + panic(err) + } + *(v.(*time.Time)) = tt +} + +func (x timeExt) ConvertExt(v interface{}) interface{} { + return x.WriteExt(v) +} +func (x timeExt) UpdateExt(v interface{}, src interface{}) { + x.ReadExt(v, src.([]byte)) +} + +// EncodeTime encodes a time.Time as a []byte, including +// information on the instant in time and UTC offset. +// +// Format Description +// +// A timestamp is composed of 3 components: +// +// - secs: signed integer representing seconds since unix epoch +// - nsces: unsigned integer representing fractional seconds as a +// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 +// - tz: signed integer representing timezone offset in minutes east of UTC, +// and a dst (daylight savings time) flag +// +// When encoding a timestamp, the first byte is the descriptor, which +// defines which components are encoded and how many bytes are used to +// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it +// is not encoded in the byte array explicitly*. +// +// Descriptor 8 bits are of the form `A B C DDD EE`: +// A: Is secs component encoded? 1 = true +// B: Is nsecs component encoded? 1 = true +// C: Is tz component encoded? 1 = true +// DDD: Number of extra bytes for secs (range 0-7). +// If A = 1, secs encoded in DDD+1 bytes. +// If A = 0, secs is not encoded, and is assumed to be 0. +// If A = 1, then we need at least 1 byte to encode secs. +// DDD says the number of extra bytes beyond that 1. +// E.g. if DDD=0, then secs is represented in 1 byte. +// if DDD=2, then secs is represented in 3 bytes. +// EE: Number of extra bytes for nsecs (range 0-3). +// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) +// +// Following the descriptor bytes, subsequent bytes are: +// +// secs component encoded in `DDD + 1` bytes (if A == 1) +// nsecs component encoded in `EE + 1` bytes (if B == 1) +// tz component encoded in 2 bytes (if C == 1) +// +// secs and nsecs components are integers encoded in a BigEndian +// 2-complement encoding format. +// +// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to +// Least significant bit 0 are described below: +// +// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). +// Bit 15 = have\_dst: set to 1 if we set the dst flag. +// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. +// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. +// +func encodeTime(t time.Time) []byte { + //t := rv.Interface().(time.Time) + tsecs, tnsecs := t.Unix(), t.Nanosecond() + var ( + bd byte + btmp [8]byte + bs [16]byte + i int = 1 + ) + l := t.Location() + if l == time.UTC { + l = nil + } + if tsecs != 0 { + bd = bd | 0x80 + bigen.PutUint64(btmp[:], uint64(tsecs)) + f := pruneSignExt(btmp[:], tsecs >= 0) + bd = bd | (byte(7-f) << 2) + copy(bs[i:], btmp[f:]) + i = i + (8 - f) + } + if tnsecs != 0 { + bd = bd | 0x40 + bigen.PutUint32(btmp[:4], uint32(tnsecs)) + f := pruneSignExt(btmp[:4], true) + bd = bd | byte(3-f) + copy(bs[i:], btmp[f:4]) + i = i + (4 - f) + } + if l != nil { + bd = bd | 0x20 + // Note that Go Libs do not give access to dst flag. + _, zoneOffset := t.Zone() + //zoneName, zoneOffset := t.Zone() + zoneOffset /= 60 + z := uint16(zoneOffset) + bigen.PutUint16(btmp[:2], z) + // clear dst flags + bs[i] = btmp[0] & 0x3f + bs[i+1] = btmp[1] + i = i + 2 + } + bs[0] = bd + return bs[0:i] +} + +// DecodeTime decodes a []byte into a time.Time. +func decodeTime(bs []byte) (tt time.Time, err error) { + bd := bs[0] + var ( + tsec int64 + tnsec uint32 + tz uint16 + i byte = 1 + i2 byte + n byte + ) + if bd&(1<<7) != 0 { + var btmp [8]byte + n = ((bd >> 2) & 0x7) + 1 + i2 = i + n + copy(btmp[8-n:], bs[i:i2]) + //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) + if bs[i]&(1<<7) != 0 { + copy(btmp[0:8-n], bsAll0xff) + //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } + } + i = i2 + tsec = int64(bigen.Uint64(btmp[:])) + } + if bd&(1<<6) != 0 { + var btmp [4]byte + n = (bd & 0x3) + 1 + i2 = i + n + copy(btmp[4-n:], bs[i:i2]) + i = i2 + tnsec = bigen.Uint32(btmp[:]) + } + if bd&(1<<5) == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + return + } + // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. + // However, we need name here, so it can be shown when time is printed. + // Zone name is in form: UTC-08:00. + // Note that Go Libs do not give access to dst flag, so we ignore dst bits + + i2 = i + 2 + tz = bigen.Uint16(bs[i:i2]) + i = i2 + // sign extend sign bit into top 2 MSB (which were dst bits): + if tz&(1<<13) == 0 { // positive + tz = tz & 0x3fff //clear 2 MSBs: dst bits + } else { // negative + tz = tz | 0xc000 //set 2 MSBs: dst bits + //tzname[3] = '-' (TODO: verify. this works here) + } + tzint := int16(tz) + if tzint == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + } else { + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + // var zoneName = timeLocUTCName(tzint) + tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) + } + return +} + +func timeLocUTCName(tzint int16) string { + if tzint == 0 { + return "UTC" + } + var tzname = []byte("UTC+00:00") + //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. + //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first + var tzhr, tzmin int16 + if tzint < 0 { + tzname[3] = '-' // (TODO: verify. this works here) + tzhr, tzmin = -tzint/60, (-tzint)%60 + } else { + tzhr, tzmin = tzint/60, tzint%60 + } + tzname[4] = timeDigits[tzhr/10] + tzname[5] = timeDigits[tzhr%10] + tzname[7] = timeDigits[tzmin/10] + tzname[8] = timeDigits[tzmin%10] + return string(tzname) + //return time.FixedZone(string(tzname), int(tzint)*60) +} diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go new file mode 100644 index 000000000..69a4ac7ee --- /dev/null +++ b/vendor/golang.org/x/net/websocket/client.go @@ -0,0 +1,106 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "io" + "net" + "net/http" + "net/url" +) + +// DialError is an error that occurs while dialling a websocket server. +type DialError struct { + *Config + Err error +} + +func (e *DialError) Error() string { + return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error() +} + +// NewConfig creates a new WebSocket config for client connection. +func NewConfig(server, origin string) (config *Config, err error) { + config = new(Config) + config.Version = ProtocolVersionHybi13 + config.Location, err = url.ParseRequestURI(server) + if err != nil { + return + } + config.Origin, err = url.ParseRequestURI(origin) + if err != nil { + return + } + config.Header = http.Header(make(map[string][]string)) + return +} + +// NewClient creates a new WebSocket client connection over rwc. +func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + err = hybiClientHandshake(config, br, bw) + if err != nil { + return + } + buf := bufio.NewReadWriter(br, bw) + ws = newHybiClientConn(config, buf, rwc) + return +} + +// Dial opens a new client connection to a WebSocket. +func Dial(url_, protocol, origin string) (ws *Conn, err error) { + config, err := NewConfig(url_, origin) + if err != nil { + return nil, err + } + if protocol != "" { + config.Protocol = []string{protocol} + } + return DialConfig(config) +} + +var portMap = map[string]string{ + "ws": "80", + "wss": "443", +} + +func parseAuthority(location *url.URL) string { + if _, ok := portMap[location.Scheme]; ok { + if _, _, err := net.SplitHostPort(location.Host); err != nil { + return net.JoinHostPort(location.Host, portMap[location.Scheme]) + } + } + return location.Host +} + +// DialConfig opens a new client connection to a WebSocket with a config. +func DialConfig(config *Config) (ws *Conn, err error) { + var client net.Conn + if config.Location == nil { + return nil, &DialError{config, ErrBadWebSocketLocation} + } + if config.Origin == nil { + return nil, &DialError{config, ErrBadWebSocketOrigin} + } + dialer := config.Dialer + if dialer == nil { + dialer = &net.Dialer{} + } + client, err = dialWithDialer(dialer, config) + if err != nil { + goto Error + } + ws, err = NewClient(config, client) + if err != nil { + client.Close() + goto Error + } + return + +Error: + return nil, &DialError{config, err} +} diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go new file mode 100644 index 000000000..2dab943a4 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/dial.go @@ -0,0 +1,24 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "net" +) + +func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) { + switch config.Location.Scheme { + case "ws": + conn, err = dialer.Dial("tcp", parseAuthority(config.Location)) + + case "wss": + conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig) + + default: + err = ErrBadScheme + } + return +} diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go new file mode 100644 index 000000000..8cffdd16c --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi.go @@ -0,0 +1,583 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +// This file implements a protocol of hybi draft. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const ( + websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" + + closeStatusNormal = 1000 + closeStatusGoingAway = 1001 + closeStatusProtocolError = 1002 + closeStatusUnsupportedData = 1003 + closeStatusFrameTooLarge = 1004 + closeStatusNoStatusRcvd = 1005 + closeStatusAbnormalClosure = 1006 + closeStatusBadMessageData = 1007 + closeStatusPolicyViolation = 1008 + closeStatusTooBigData = 1009 + closeStatusExtensionMismatch = 1010 + + maxControlFramePayloadLength = 125 +) + +var ( + ErrBadMaskingKey = &ProtocolError{"bad masking key"} + ErrBadPongMessage = &ProtocolError{"bad pong message"} + ErrBadClosingStatus = &ProtocolError{"bad closing status"} + ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"} + ErrNotImplemented = &ProtocolError{"not implemented"} + + handshakeHeader = map[string]bool{ + "Host": true, + "Upgrade": true, + "Connection": true, + "Sec-Websocket-Key": true, + "Sec-Websocket-Origin": true, + "Sec-Websocket-Version": true, + "Sec-Websocket-Protocol": true, + "Sec-Websocket-Accept": true, + } +) + +// A hybiFrameHeader is a frame header as defined in hybi draft. +type hybiFrameHeader struct { + Fin bool + Rsv [3]bool + OpCode byte + Length int64 + MaskingKey []byte + + data *bytes.Buffer +} + +// A hybiFrameReader is a reader for hybi frame. +type hybiFrameReader struct { + reader io.Reader + + header hybiFrameHeader + pos int64 + length int +} + +func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) { + n, err = frame.reader.Read(msg) + if frame.header.MaskingKey != nil { + for i := 0; i < n; i++ { + msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4] + frame.pos++ + } + } + return n, err +} + +func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode } + +func (frame *hybiFrameReader) HeaderReader() io.Reader { + if frame.header.data == nil { + return nil + } + if frame.header.data.Len() == 0 { + return nil + } + return frame.header.data +} + +func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil } + +func (frame *hybiFrameReader) Len() (n int) { return frame.length } + +// A hybiFrameReaderFactory creates new frame reader based on its frame type. +type hybiFrameReaderFactory struct { + *bufio.Reader +} + +// NewFrameReader reads a frame header from the connection, and creates new reader for the frame. +// See Section 5.2 Base Framing protocol for detail. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2 +func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) { + hybiFrame := new(hybiFrameReader) + frame = hybiFrame + var header []byte + var b byte + // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0 + for i := 0; i < 3; i++ { + j := uint(6 - i) + hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0 + } + hybiFrame.header.OpCode = header[0] & 0x0f + + // Second byte. Mask/Payload len(7bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + mask := (b & 0x80) != 0 + b &= 0x7f + lengthFields := 0 + switch { + case b <= 125: // Payload length 7bits. + hybiFrame.header.Length = int64(b) + case b == 126: // Payload length 7+16bits + lengthFields = 2 + case b == 127: // Payload length 7+64bits + lengthFields = 8 + } + for i := 0; i < lengthFields; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits + b &= 0x7f + } + header = append(header, b) + hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b) + } + if mask { + // Masking key. 4 bytes. + for i := 0; i < 4; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b) + } + } + hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length) + hybiFrame.header.data = bytes.NewBuffer(header) + hybiFrame.length = len(header) + int(hybiFrame.header.Length) + return +} + +// A HybiFrameWriter is a writer for hybi frame. +type hybiFrameWriter struct { + writer *bufio.Writer + + header *hybiFrameHeader +} + +func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) { + var header []byte + var b byte + if frame.header.Fin { + b |= 0x80 + } + for i := 0; i < 3; i++ { + if frame.header.Rsv[i] { + j := uint(6 - i) + b |= 1 << j + } + } + b |= frame.header.OpCode + header = append(header, b) + if frame.header.MaskingKey != nil { + b = 0x80 + } else { + b = 0 + } + lengthFields := 0 + length := len(msg) + switch { + case length <= 125: + b |= byte(length) + case length < 65536: + b |= 126 + lengthFields = 2 + default: + b |= 127 + lengthFields = 8 + } + header = append(header, b) + for i := 0; i < lengthFields; i++ { + j := uint((lengthFields - i - 1) * 8) + b = byte((length >> j) & 0xff) + header = append(header, b) + } + if frame.header.MaskingKey != nil { + if len(frame.header.MaskingKey) != 4 { + return 0, ErrBadMaskingKey + } + header = append(header, frame.header.MaskingKey...) + frame.writer.Write(header) + data := make([]byte, length) + for i := range data { + data[i] = msg[i] ^ frame.header.MaskingKey[i%4] + } + frame.writer.Write(data) + err = frame.writer.Flush() + return length, err + } + frame.writer.Write(header) + frame.writer.Write(msg) + err = frame.writer.Flush() + return length, err +} + +func (frame *hybiFrameWriter) Close() error { return nil } + +type hybiFrameWriterFactory struct { + *bufio.Writer + needMaskingKey bool +} + +func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType} + if buf.needMaskingKey { + frameHeader.MaskingKey, err = generateMaskingKey() + if err != nil { + return nil, err + } + } + return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil +} + +type hybiFrameHandler struct { + conn *Conn + payloadType byte +} + +func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) { + if handler.conn.IsServerConn() { + // The client MUST mask all frames sent to the server. + if frame.(*hybiFrameReader).header.MaskingKey == nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } else { + // The server MUST NOT mask all frames. + if frame.(*hybiFrameReader).header.MaskingKey != nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } + if header := frame.HeaderReader(); header != nil { + io.Copy(ioutil.Discard, header) + } + switch frame.PayloadType() { + case ContinuationFrame: + frame.(*hybiFrameReader).header.OpCode = handler.payloadType + case TextFrame, BinaryFrame: + handler.payloadType = frame.PayloadType() + case CloseFrame: + return nil, io.EOF + case PingFrame, PongFrame: + b := make([]byte, maxControlFramePayloadLength) + n, err := io.ReadFull(frame, b) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return nil, err + } + io.Copy(ioutil.Discard, frame) + if frame.PayloadType() == PingFrame { + if _, err := handler.WritePong(b[:n]); err != nil { + return nil, err + } + } + return nil, nil + } + return frame, nil +} + +func (handler *hybiFrameHandler) WriteClose(status int) (err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame) + if err != nil { + return err + } + msg := make([]byte, 2) + binary.BigEndian.PutUint16(msg, uint16(status)) + _, err = w.Write(msg) + w.Close() + return err +} + +func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// newHybiConn creates a new WebSocket connection speaking hybi draft protocol. +func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + if buf == nil { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + buf = bufio.NewReadWriter(br, bw) + } + ws := &Conn{config: config, request: request, buf: buf, rwc: rwc, + frameReaderFactory: hybiFrameReaderFactory{buf.Reader}, + frameWriterFactory: hybiFrameWriterFactory{ + buf.Writer, request == nil}, + PayloadType: TextFrame, + defaultCloseStatus: closeStatusNormal} + ws.frameHandler = &hybiFrameHandler{conn: ws} + return ws +} + +// generateMaskingKey generates a masking key for a frame. +func generateMaskingKey() (maskingKey []byte, err error) { + maskingKey = make([]byte, 4) + if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil { + return + } + return +} + +// generateNonce generates a nonce consisting of a randomly selected 16-byte +// value that has been base64-encoded. +func generateNonce() (nonce []byte) { + key := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + panic(err) + } + nonce = make([]byte, 24) + base64.StdEncoding.Encode(nonce, key) + return +} + +// removeZone removes IPv6 zone identifer from host. +// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" +func removeZone(host string) string { + if !strings.HasPrefix(host, "[") { + return host + } + i := strings.LastIndex(host, "]") + if i < 0 { + return host + } + j := strings.LastIndex(host[:i], "%") + if j < 0 { + return host + } + return host[:j] + host[i:] +} + +// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of +// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string. +func getNonceAccept(nonce []byte) (expected []byte, err error) { + h := sha1.New() + if _, err = h.Write(nonce); err != nil { + return + } + if _, err = h.Write([]byte(websocketGUID)); err != nil { + return + } + expected = make([]byte, 28) + base64.StdEncoding.Encode(expected, h.Sum(nil)) + return +} + +// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17 +func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) { + bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n") + + // According to RFC 6874, an HTTP client, proxy, or other + // intermediary must remove any IPv6 zone identifier attached + // to an outgoing URI. + bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n") + bw.WriteString("Upgrade: websocket\r\n") + bw.WriteString("Connection: Upgrade\r\n") + nonce := generateNonce() + if config.handshakeData != nil { + nonce = []byte(config.handshakeData["key"]) + } + bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n") + bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n") + + if config.Version != ProtocolVersionHybi13 { + return ErrBadProtocolVersion + } + + bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n") + if len(config.Protocol) > 0 { + bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + err = config.Header.WriteSubset(bw, handshakeHeader) + if err != nil { + return err + } + + bw.WriteString("\r\n") + if err = bw.Flush(); err != nil { + return err + } + + resp, err := http.ReadResponse(br, &http.Request{Method: "GET"}) + if err != nil { + return err + } + if resp.StatusCode != 101 { + return ErrBadStatus + } + if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" || + strings.ToLower(resp.Header.Get("Connection")) != "upgrade" { + return ErrBadUpgrade + } + expectedAccept, err := getNonceAccept(nonce) + if err != nil { + return err + } + if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) { + return ErrChallengeResponse + } + if resp.Header.Get("Sec-WebSocket-Extensions") != "" { + return ErrUnsupportedExtensions + } + offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol") + if offeredProtocol != "" { + protocolMatched := false + for i := 0; i < len(config.Protocol); i++ { + if config.Protocol[i] == offeredProtocol { + protocolMatched = true + break + } + } + if !protocolMatched { + return ErrBadWebSocketProtocol + } + config.Protocol = []string{offeredProtocol} + } + + return nil +} + +// newHybiClientConn creates a client WebSocket connection after handshake. +func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn { + return newHybiConn(config, buf, rwc, nil) +} + +// A HybiServerHandshaker performs a server handshake using hybi draft protocol. +type hybiServerHandshaker struct { + *Config + accept []byte +} + +func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) { + c.Version = ProtocolVersionHybi13 + if req.Method != "GET" { + return http.StatusMethodNotAllowed, ErrBadRequestMethod + } + // HTTP version can be safely ignored. + + if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" || + !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") { + return http.StatusBadRequest, ErrNotWebSocket + } + + key := req.Header.Get("Sec-Websocket-Key") + if key == "" { + return http.StatusBadRequest, ErrChallengeResponse + } + version := req.Header.Get("Sec-Websocket-Version") + switch version { + case "13": + c.Version = ProtocolVersionHybi13 + default: + return http.StatusBadRequest, ErrBadWebSocketVersion + } + var scheme string + if req.TLS != nil { + scheme = "wss" + } else { + scheme = "ws" + } + c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI()) + if err != nil { + return http.StatusBadRequest, err + } + protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol")) + if protocol != "" { + protocols := strings.Split(protocol, ",") + for i := 0; i < len(protocols); i++ { + c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i])) + } + } + c.accept, err = getNonceAccept([]byte(key)) + if err != nil { + return http.StatusInternalServerError, err + } + return http.StatusSwitchingProtocols, nil +} + +// Origin parses the Origin header in req. +// If the Origin header is not set, it returns nil and nil. +func Origin(config *Config, req *http.Request) (*url.URL, error) { + var origin string + switch config.Version { + case ProtocolVersionHybi13: + origin = req.Header.Get("Origin") + } + if origin == "" { + return nil, nil + } + return url.ParseRequestURI(origin) +} + +func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) { + if len(c.Protocol) > 0 { + if len(c.Protocol) != 1 { + // You need choose a Protocol in Handshake func in Server. + return ErrBadWebSocketProtocol + } + } + buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n") + buf.WriteString("Upgrade: websocket\r\n") + buf.WriteString("Connection: Upgrade\r\n") + buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n") + if len(c.Protocol) > 0 { + buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + if c.Header != nil { + err := c.Header.WriteSubset(buf, handshakeHeader) + if err != nil { + return err + } + } + buf.WriteString("\r\n") + return buf.Flush() +} + +func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiServerConn(c.Config, buf, rwc, request) +} + +// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol. +func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiConn(config, buf, rwc, request) +} diff --git a/vendor/golang.org/x/net/websocket/server.go b/vendor/golang.org/x/net/websocket/server.go new file mode 100644 index 000000000..0895dea19 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/server.go @@ -0,0 +1,113 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "fmt" + "io" + "net/http" +) + +func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) { + var hs serverHandshaker = &hybiServerHandshaker{Config: config} + code, err := hs.ReadHandshake(buf.Reader, req) + if err == ErrBadWebSocketVersion { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if err != nil { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if handshake != nil { + err = handshake(config, req) + if err != nil { + code = http.StatusForbidden + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + } + err = hs.AcceptHandshake(buf.Writer) + if err != nil { + code = http.StatusBadRequest + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + conn = hs.NewServerConn(buf, rwc, req) + return +} + +// Server represents a server of a WebSocket. +type Server struct { + // Config is a WebSocket configuration for new WebSocket connection. + Config + + // Handshake is an optional function in WebSocket handshake. + // For example, you can check, or don't check Origin header. + // Another example, you can select config.Protocol. + Handshake func(*Config, *http.Request) error + + // Handler handles a WebSocket connection. + Handler +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.serveWebSocket(w, req) +} + +func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) { + rwc, buf, err := w.(http.Hijacker).Hijack() + if err != nil { + panic("Hijack failed: " + err.Error()) + } + // The server should abort the WebSocket connection if it finds + // the client did not send a handshake that matches with protocol + // specification. + defer rwc.Close() + conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake) + if err != nil { + return + } + if conn == nil { + panic("unexpected nil conn") + } + s.Handler(conn) +} + +// Handler is a simple interface to a WebSocket browser client. +// It checks if Origin header is valid URL by default. +// You might want to verify websocket.Conn.Config().Origin in the func. +// If you use Server instead of Handler, you could call websocket.Origin and +// check the origin in your Handshake func. So, if you want to accept +// non-browser clients, which do not send an Origin header, set a +// Server.Handshake that does not check the origin. +type Handler func(*Conn) + +func checkOrigin(config *Config, req *http.Request) (err error) { + config.Origin, err = Origin(config, req) + if err == nil && config.Origin == nil { + return fmt.Errorf("null origin") + } + return err +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s := Server{Handler: h, Handshake: checkOrigin} + s.serveWebSocket(w, req) +} diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go new file mode 100644 index 000000000..e242c89a7 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -0,0 +1,448 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements a client and server for the WebSocket protocol +// as specified in RFC 6455. +// +// This package currently lacks some features found in an alternative +// and more actively maintained WebSocket package: +// +// https://godoc.org/github.com/gorilla/websocket +// +package websocket // import "golang.org/x/net/websocket" + +import ( + "bufio" + "crypto/tls" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "sync" + "time" +) + +const ( + ProtocolVersionHybi13 = 13 + ProtocolVersionHybi = ProtocolVersionHybi13 + SupportedProtocolVersion = "13" + + ContinuationFrame = 0 + TextFrame = 1 + BinaryFrame = 2 + CloseFrame = 8 + PingFrame = 9 + PongFrame = 10 + UnknownFrame = 255 + + DefaultMaxPayloadBytes = 32 << 20 // 32MB +) + +// ProtocolError represents WebSocket protocol errors. +type ProtocolError struct { + ErrorString string +} + +func (err *ProtocolError) Error() string { return err.ErrorString } + +var ( + ErrBadProtocolVersion = &ProtocolError{"bad protocol version"} + ErrBadScheme = &ProtocolError{"bad scheme"} + ErrBadStatus = &ProtocolError{"bad status"} + ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"} + ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"} + ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"} + ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"} + ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"} + ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"} + ErrBadFrame = &ProtocolError{"bad frame"} + ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"} + ErrNotWebSocket = &ProtocolError{"not websocket protocol"} + ErrBadRequestMethod = &ProtocolError{"bad method"} + ErrNotSupported = &ProtocolError{"not supported"} +) + +// ErrFrameTooLarge is returned by Codec's Receive method if payload size +// exceeds limit set by Conn.MaxPayloadBytes +var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit") + +// Addr is an implementation of net.Addr for WebSocket. +type Addr struct { + *url.URL +} + +// Network returns the network type for a WebSocket, "websocket". +func (addr *Addr) Network() string { return "websocket" } + +// Config is a WebSocket configuration +type Config struct { + // A WebSocket server address. + Location *url.URL + + // A Websocket client origin. + Origin *url.URL + + // WebSocket subprotocols. + Protocol []string + + // WebSocket protocol version. + Version int + + // TLS config for secure WebSocket (wss). + TlsConfig *tls.Config + + // Additional header fields to be sent in WebSocket opening handshake. + Header http.Header + + // Dialer used when opening websocket connections. + Dialer *net.Dialer + + handshakeData map[string]string +} + +// serverHandshaker is an interface to handle WebSocket server side handshake. +type serverHandshaker interface { + // ReadHandshake reads handshake request message from client. + // Returns http response code and error if any. + ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) + + // AcceptHandshake accepts the client handshake request and sends + // handshake response back to client. + AcceptHandshake(buf *bufio.Writer) (err error) + + // NewServerConn creates a new WebSocket connection. + NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn) +} + +// frameReader is an interface to read a WebSocket frame. +type frameReader interface { + // Reader is to read payload of the frame. + io.Reader + + // PayloadType returns payload type. + PayloadType() byte + + // HeaderReader returns a reader to read header of the frame. + HeaderReader() io.Reader + + // TrailerReader returns a reader to read trailer of the frame. + // If it returns nil, there is no trailer in the frame. + TrailerReader() io.Reader + + // Len returns total length of the frame, including header and trailer. + Len() int +} + +// frameReaderFactory is an interface to creates new frame reader. +type frameReaderFactory interface { + NewFrameReader() (r frameReader, err error) +} + +// frameWriter is an interface to write a WebSocket frame. +type frameWriter interface { + // Writer is to write payload of the frame. + io.WriteCloser +} + +// frameWriterFactory is an interface to create new frame writer. +type frameWriterFactory interface { + NewFrameWriter(payloadType byte) (w frameWriter, err error) +} + +type frameHandler interface { + HandleFrame(frame frameReader) (r frameReader, err error) + WriteClose(status int) (err error) +} + +// Conn represents a WebSocket connection. +// +// Multiple goroutines may invoke methods on a Conn simultaneously. +type Conn struct { + config *Config + request *http.Request + + buf *bufio.ReadWriter + rwc io.ReadWriteCloser + + rio sync.Mutex + frameReaderFactory + frameReader + + wio sync.Mutex + frameWriterFactory + + frameHandler + PayloadType byte + defaultCloseStatus int + + // MaxPayloadBytes limits the size of frame payload received over Conn + // by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used. + MaxPayloadBytes int +} + +// Read implements the io.Reader interface: +// it reads data of a frame from the WebSocket connection. +// if msg is not large enough for the frame data, it fills the msg and next Read +// will read the rest of the frame data. +// it reads Text frame or Binary frame. +func (ws *Conn) Read(msg []byte) (n int, err error) { + ws.rio.Lock() + defer ws.rio.Unlock() +again: + if ws.frameReader == nil { + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return 0, err + } + ws.frameReader, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return 0, err + } + if ws.frameReader == nil { + goto again + } + } + n, err = ws.frameReader.Read(msg) + if err == io.EOF { + if trailer := ws.frameReader.TrailerReader(); trailer != nil { + io.Copy(ioutil.Discard, trailer) + } + ws.frameReader = nil + goto again + } + return n, err +} + +// Write implements the io.Writer interface: +// it writes data as a frame to the WebSocket connection. +func (ws *Conn) Write(msg []byte) (n int, err error) { + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// Close implements the io.Closer interface. +func (ws *Conn) Close() error { + err := ws.frameHandler.WriteClose(ws.defaultCloseStatus) + err1 := ws.rwc.Close() + if err != nil { + return err + } + return err1 +} + +func (ws *Conn) IsClientConn() bool { return ws.request == nil } +func (ws *Conn) IsServerConn() bool { return ws.request != nil } + +// LocalAddr returns the WebSocket Origin for the connection for client, or +// the WebSocket location for server. +func (ws *Conn) LocalAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Origin} + } + return &Addr{ws.config.Location} +} + +// RemoteAddr returns the WebSocket location for the connection for client, or +// the Websocket Origin for server. +func (ws *Conn) RemoteAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Location} + } + return &Addr{ws.config.Origin} +} + +var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn") + +// SetDeadline sets the connection's network read & write deadlines. +func (ws *Conn) SetDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetDeadline(t) + } + return errSetDeadline +} + +// SetReadDeadline sets the connection's network read deadline. +func (ws *Conn) SetReadDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetReadDeadline(t) + } + return errSetDeadline +} + +// SetWriteDeadline sets the connection's network write deadline. +func (ws *Conn) SetWriteDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetWriteDeadline(t) + } + return errSetDeadline +} + +// Config returns the WebSocket config. +func (ws *Conn) Config() *Config { return ws.config } + +// Request returns the http request upgraded to the WebSocket. +// It is nil for client side. +func (ws *Conn) Request() *http.Request { return ws.request } + +// Codec represents a symmetric pair of functions that implement a codec. +type Codec struct { + Marshal func(v interface{}) (data []byte, payloadType byte, err error) + Unmarshal func(data []byte, payloadType byte, v interface{}) (err error) +} + +// Send sends v marshaled by cd.Marshal as single frame to ws. +func (cd Codec) Send(ws *Conn, v interface{}) (err error) { + data, payloadType, err := cd.Marshal(v) + if err != nil { + return err + } + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(payloadType) + if err != nil { + return err + } + _, err = w.Write(data) + w.Close() + return err +} + +// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores +// in v. The whole frame payload is read to an in-memory buffer; max size of +// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds +// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire +// completely. The next call to Receive would read and discard leftover data of +// previous oversized frame before processing next frame. +func (cd Codec) Receive(ws *Conn, v interface{}) (err error) { + ws.rio.Lock() + defer ws.rio.Unlock() + if ws.frameReader != nil { + _, err = io.Copy(ioutil.Discard, ws.frameReader) + if err != nil { + return err + } + ws.frameReader = nil + } +again: + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return err + } + frame, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return err + } + if frame == nil { + goto again + } + maxPayloadBytes := ws.MaxPayloadBytes + if maxPayloadBytes == 0 { + maxPayloadBytes = DefaultMaxPayloadBytes + } + if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) { + // payload size exceeds limit, no need to call Unmarshal + // + // set frameReader to current oversized frame so that + // the next call to this function can drain leftover + // data before processing the next frame + ws.frameReader = frame + return ErrFrameTooLarge + } + payloadType := frame.PayloadType() + data, err := ioutil.ReadAll(frame) + if err != nil { + return err + } + return cd.Unmarshal(data, payloadType, v) +} + +func marshal(v interface{}) (msg []byte, payloadType byte, err error) { + switch data := v.(type) { + case string: + return []byte(data), TextFrame, nil + case []byte: + return data, BinaryFrame, nil + } + return nil, UnknownFrame, ErrNotSupported +} + +func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + switch data := v.(type) { + case *string: + *data = string(msg) + return nil + case *[]byte: + *data = msg + return nil + } + return ErrNotSupported +} + +/* +Message is a codec to send/receive text/binary data in a frame on WebSocket connection. +To send/receive text frame, use string type. +To send/receive binary frame, use []byte type. + +Trivial usage: + + import "websocket" + + // receive text frame + var message string + websocket.Message.Receive(ws, &message) + + // send text frame + message = "hello" + websocket.Message.Send(ws, message) + + // receive binary frame + var data []byte + websocket.Message.Receive(ws, &data) + + // send binary frame + data = []byte{0, 1, 2} + websocket.Message.Send(ws, data) + +*/ +var Message = Codec{marshal, unmarshal} + +func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) { + msg, err = json.Marshal(v) + return msg, TextFrame, err +} + +func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + return json.Unmarshal(msg, v) +} + +/* +JSON is a codec to send/receive JSON data in a frame from a WebSocket connection. + +Trivial usage: + + import "websocket" + + type T struct { + Msg string + Count int + } + + // receive JSON type T + var data T + websocket.JSON.Receive(ws, &data) + + // send JSON type T + websocket.JSON.Send(ws, data) +*/ +var JSON = Codec{jsonMarshal, jsonUnmarshal} diff --git a/vendor/golang.org/x/text/cases/cases.go b/vendor/golang.org/x/text/cases/cases.go new file mode 100644 index 000000000..736c0c312 --- /dev/null +++ b/vendor/golang.org/x/text/cases/cases.go @@ -0,0 +1,162 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_trieval.go + +// Package cases provides general and language-specific case mappers. +package cases // import "golang.org/x/text/cases" + +import ( + "golang.org/x/text/language" + "golang.org/x/text/transform" +) + +// References: +// - Unicode Reference Manual Chapter 3.13, 4.2, and 5.18. +// - http://www.unicode.org/reports/tr29/ +// - http://www.unicode.org/Public/6.3.0/ucd/CaseFolding.txt +// - http://www.unicode.org/Public/6.3.0/ucd/SpecialCasing.txt +// - http://www.unicode.org/Public/6.3.0/ucd/DerivedCoreProperties.txt +// - http://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakProperty.txt +// - http://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakTest.txt +// - http://userguide.icu-project.org/transforms/casemappings + +// TODO: +// - Case folding +// - Wide and Narrow? +// - Segmenter option for title casing. +// - ASCII fast paths +// - Encode Soft-Dotted property within trie somehow. + +// A Caser transforms given input to a certain case. It implements +// transform.Transformer. +// +// A Caser may be stateful and should therefore not be shared between +// goroutines. +type Caser struct { + t transform.SpanningTransformer +} + +// Bytes returns a new byte slice with the result of converting b to the case +// form implemented by c. +func (c Caser) Bytes(b []byte) []byte { + b, _, _ = transform.Bytes(c.t, b) + return b +} + +// String returns a string with the result of transforming s to the case form +// implemented by c. +func (c Caser) String(s string) string { + s, _, _ = transform.String(c.t, s) + return s +} + +// Reset resets the Caser to be reused for new input after a previous call to +// Transform. +func (c Caser) Reset() { c.t.Reset() } + +// Transform implements the transform.Transformer interface and transforms the +// given input to the case form implemented by c. +func (c Caser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return c.t.Transform(dst, src, atEOF) +} + +// Span implements the transform.SpanningTransformer interface. +func (c Caser) Span(src []byte, atEOF bool) (n int, err error) { + return c.t.Span(src, atEOF) +} + +// Upper returns a Caser for language-specific uppercasing. +func Upper(t language.Tag, opts ...Option) Caser { + return Caser{makeUpper(t, getOpts(opts...))} +} + +// Lower returns a Caser for language-specific lowercasing. +func Lower(t language.Tag, opts ...Option) Caser { + return Caser{makeLower(t, getOpts(opts...))} +} + +// Title returns a Caser for language-specific title casing. It uses an +// approximation of the default Unicode Word Break algorithm. +func Title(t language.Tag, opts ...Option) Caser { + return Caser{makeTitle(t, getOpts(opts...))} +} + +// Fold returns a Caser that implements Unicode case folding. The returned Caser +// is stateless and safe to use concurrently by multiple goroutines. +// +// Case folding does not normalize the input and may not preserve a normal form. +// Use the collate or search package for more convenient and linguistically +// sound comparisons. Use golang.org/x/text/secure/precis for string comparisons +// where security aspects are a concern. +func Fold(opts ...Option) Caser { + return Caser{makeFold(getOpts(opts...))} +} + +// An Option is used to modify the behavior of a Caser. +type Option func(o options) options + +// TODO: consider these options to take a boolean as well, like FinalSigma. +// The advantage of using this approach is that other providers of a lower-case +// algorithm could set different defaults by prefixing a user-provided slice +// of options with their own. This is handy, for instance, for the precis +// package which would override the default to not handle the Greek final sigma. + +var ( + // NoLower disables the lowercasing of non-leading letters for a title + // caser. + NoLower Option = noLower + + // Compact omits mappings in case folding for characters that would grow the + // input. (Unimplemented.) + Compact Option = compact +) + +// TODO: option to preserve a normal form, if applicable? + +type options struct { + noLower bool + simple bool + + // TODO: segmenter, max ignorable, alternative versions, etc. + + ignoreFinalSigma bool +} + +func getOpts(o ...Option) (res options) { + for _, f := range o { + res = f(res) + } + return +} + +func noLower(o options) options { + o.noLower = true + return o +} + +func compact(o options) options { + o.simple = true + return o +} + +// HandleFinalSigma specifies whether the special handling of Greek final sigma +// should be enabled. Unicode prescribes handling the Greek final sigma for all +// locales, but standards like IDNA and PRECIS override this default. +func HandleFinalSigma(enable bool) Option { + if enable { + return handleFinalSigma + } + return ignoreFinalSigma +} + +func ignoreFinalSigma(o options) options { + o.ignoreFinalSigma = true + return o +} + +func handleFinalSigma(o options) options { + o.ignoreFinalSigma = false + return o +} diff --git a/vendor/golang.org/x/text/cases/context.go b/vendor/golang.org/x/text/cases/context.go new file mode 100644 index 000000000..e9aa9e193 --- /dev/null +++ b/vendor/golang.org/x/text/cases/context.go @@ -0,0 +1,376 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cases + +import "golang.org/x/text/transform" + +// A context is used for iterating over source bytes, fetching case info and +// writing to a destination buffer. +// +// Casing operations may need more than one rune of context to decide how a rune +// should be cased. Casing implementations should call checkpoint on context +// whenever it is known to be safe to return the runes processed so far. +// +// It is recommended for implementations to not allow for more than 30 case +// ignorables as lookahead (analogous to the limit in norm) and to use state if +// unbounded lookahead is needed for cased runes. +type context struct { + dst, src []byte + atEOF bool + + pDst int // pDst points past the last written rune in dst. + pSrc int // pSrc points to the start of the currently scanned rune. + + // checkpoints safe to return in Transform, where nDst <= pDst and nSrc <= pSrc. + nDst, nSrc int + err error + + sz int // size of current rune + info info // case information of currently scanned rune + + // State preserved across calls to Transform. + isMidWord bool // false if next cased letter needs to be title-cased. +} + +func (c *context) Reset() { + c.isMidWord = false +} + +// ret returns the return values for the Transform method. It checks whether +// there were insufficient bytes in src to complete and introduces an error +// accordingly, if necessary. +func (c *context) ret() (nDst, nSrc int, err error) { + if c.err != nil || c.nSrc == len(c.src) { + return c.nDst, c.nSrc, c.err + } + // This point is only reached by mappers if there was no short destination + // buffer. This means that the source buffer was exhausted and that c.sz was + // set to 0 by next. + if c.atEOF && c.pSrc == len(c.src) { + return c.pDst, c.pSrc, nil + } + return c.nDst, c.nSrc, transform.ErrShortSrc +} + +// retSpan returns the return values for the Span method. It checks whether +// there were insufficient bytes in src to complete and introduces an error +// accordingly, if necessary. +func (c *context) retSpan() (n int, err error) { + _, nSrc, err := c.ret() + return nSrc, err +} + +// checkpoint sets the return value buffer points for Transform to the current +// positions. +func (c *context) checkpoint() { + if c.err == nil { + c.nDst, c.nSrc = c.pDst, c.pSrc+c.sz + } +} + +// unreadRune causes the last rune read by next to be reread on the next +// invocation of next. Only one unreadRune may be called after a call to next. +func (c *context) unreadRune() { + c.sz = 0 +} + +func (c *context) next() bool { + c.pSrc += c.sz + if c.pSrc == len(c.src) || c.err != nil { + c.info, c.sz = 0, 0 + return false + } + v, sz := trie.lookup(c.src[c.pSrc:]) + c.info, c.sz = info(v), sz + if c.sz == 0 { + if c.atEOF { + // A zero size means we have an incomplete rune. If we are atEOF, + // this means it is an illegal rune, which we will consume one + // byte at a time. + c.sz = 1 + } else { + c.err = transform.ErrShortSrc + return false + } + } + return true +} + +// writeBytes adds bytes to dst. +func (c *context) writeBytes(b []byte) bool { + if len(c.dst)-c.pDst < len(b) { + c.err = transform.ErrShortDst + return false + } + // This loop is faster than using copy. + for _, ch := range b { + c.dst[c.pDst] = ch + c.pDst++ + } + return true +} + +// writeString writes the given string to dst. +func (c *context) writeString(s string) bool { + if len(c.dst)-c.pDst < len(s) { + c.err = transform.ErrShortDst + return false + } + // This loop is faster than using copy. + for i := 0; i < len(s); i++ { + c.dst[c.pDst] = s[i] + c.pDst++ + } + return true +} + +// copy writes the current rune to dst. +func (c *context) copy() bool { + return c.writeBytes(c.src[c.pSrc : c.pSrc+c.sz]) +} + +// copyXOR copies the current rune to dst and modifies it by applying the XOR +// pattern of the case info. It is the responsibility of the caller to ensure +// that this is a rune with a XOR pattern defined. +func (c *context) copyXOR() bool { + if !c.copy() { + return false + } + if c.info&xorIndexBit == 0 { + // Fast path for 6-bit XOR pattern, which covers most cases. + c.dst[c.pDst-1] ^= byte(c.info >> xorShift) + } else { + // Interpret XOR bits as an index. + // TODO: test performance for unrolling this loop. Verify that we have + // at least two bytes and at most three. + idx := c.info >> xorShift + for p := c.pDst - 1; ; p-- { + c.dst[p] ^= xorData[idx] + idx-- + if xorData[idx] == 0 { + break + } + } + } + return true +} + +// hasPrefix returns true if src[pSrc:] starts with the given string. +func (c *context) hasPrefix(s string) bool { + b := c.src[c.pSrc:] + if len(b) < len(s) { + return false + } + for i, c := range b[:len(s)] { + if c != s[i] { + return false + } + } + return true +} + +// caseType returns an info with only the case bits, normalized to either +// cLower, cUpper, cTitle or cUncased. +func (c *context) caseType() info { + cm := c.info & 0x7 + if cm < 4 { + return cm + } + if cm >= cXORCase { + // xor the last bit of the rune with the case type bits. + b := c.src[c.pSrc+c.sz-1] + return info(b&1) ^ cm&0x3 + } + if cm == cIgnorableCased { + return cLower + } + return cUncased +} + +// lower writes the lowercase version of the current rune to dst. +func lower(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cLower { + return c.copy() + } + if c.info&exceptionBit == 0 { + return c.copyXOR() + } + e := exceptions[c.info>>exceptionShift:] + offset := 2 + e[0]&lengthMask // size of header + fold string + if nLower := (e[1] >> lengthBits) & lengthMask; nLower != noChange { + return c.writeString(e[offset : offset+nLower]) + } + return c.copy() +} + +func isLower(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cLower { + return true + } + if c.info&exceptionBit == 0 { + c.err = transform.ErrEndOfSpan + return false + } + e := exceptions[c.info>>exceptionShift:] + if nLower := (e[1] >> lengthBits) & lengthMask; nLower != noChange { + c.err = transform.ErrEndOfSpan + return false + } + return true +} + +// upper writes the uppercase version of the current rune to dst. +func upper(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cUpper { + return c.copy() + } + if c.info&exceptionBit == 0 { + return c.copyXOR() + } + e := exceptions[c.info>>exceptionShift:] + offset := 2 + e[0]&lengthMask // size of header + fold string + // Get length of first special case mapping. + n := (e[1] >> lengthBits) & lengthMask + if ct == cTitle { + // The first special case mapping is for lower. Set n to the second. + if n == noChange { + n = 0 + } + n, e = e[1]&lengthMask, e[n:] + } + if n != noChange { + return c.writeString(e[offset : offset+n]) + } + return c.copy() +} + +// isUpper writes the isUppercase version of the current rune to dst. +func isUpper(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cUpper { + return true + } + if c.info&exceptionBit == 0 { + c.err = transform.ErrEndOfSpan + return false + } + e := exceptions[c.info>>exceptionShift:] + // Get length of first special case mapping. + n := (e[1] >> lengthBits) & lengthMask + if ct == cTitle { + n = e[1] & lengthMask + } + if n != noChange { + c.err = transform.ErrEndOfSpan + return false + } + return true +} + +// title writes the title case version of the current rune to dst. +func title(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cTitle { + return c.copy() + } + if c.info&exceptionBit == 0 { + if ct == cLower { + return c.copyXOR() + } + return c.copy() + } + // Get the exception data. + e := exceptions[c.info>>exceptionShift:] + offset := 2 + e[0]&lengthMask // size of header + fold string + + nFirst := (e[1] >> lengthBits) & lengthMask + if nTitle := e[1] & lengthMask; nTitle != noChange { + if nFirst != noChange { + e = e[nFirst:] + } + return c.writeString(e[offset : offset+nTitle]) + } + if ct == cLower && nFirst != noChange { + // Use the uppercase version instead. + return c.writeString(e[offset : offset+nFirst]) + } + // Already in correct case. + return c.copy() +} + +// isTitle reports whether the current rune is in title case. +func isTitle(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cTitle { + return true + } + if c.info&exceptionBit == 0 { + if ct == cLower { + c.err = transform.ErrEndOfSpan + return false + } + return true + } + // Get the exception data. + e := exceptions[c.info>>exceptionShift:] + if nTitle := e[1] & lengthMask; nTitle != noChange { + c.err = transform.ErrEndOfSpan + return false + } + nFirst := (e[1] >> lengthBits) & lengthMask + if ct == cLower && nFirst != noChange { + c.err = transform.ErrEndOfSpan + return false + } + return true +} + +// foldFull writes the foldFull version of the current rune to dst. +func foldFull(c *context) bool { + if c.info&hasMappingMask == 0 { + return c.copy() + } + ct := c.caseType() + if c.info&exceptionBit == 0 { + if ct != cLower || c.info&inverseFoldBit != 0 { + return c.copyXOR() + } + return c.copy() + } + e := exceptions[c.info>>exceptionShift:] + n := e[0] & lengthMask + if n == 0 { + if ct == cLower { + return c.copy() + } + n = (e[1] >> lengthBits) & lengthMask + } + return c.writeString(e[2 : 2+n]) +} + +// isFoldFull reports whether the current run is mapped to foldFull +func isFoldFull(c *context) bool { + if c.info&hasMappingMask == 0 { + return true + } + ct := c.caseType() + if c.info&exceptionBit == 0 { + if ct != cLower || c.info&inverseFoldBit != 0 { + c.err = transform.ErrEndOfSpan + return false + } + return true + } + e := exceptions[c.info>>exceptionShift:] + n := e[0] & lengthMask + if n == 0 && ct == cLower { + return true + } + c.err = transform.ErrEndOfSpan + return false +} diff --git a/vendor/golang.org/x/text/cases/fold.go b/vendor/golang.org/x/text/cases/fold.go new file mode 100644 index 000000000..85cc434fa --- /dev/null +++ b/vendor/golang.org/x/text/cases/fold.go @@ -0,0 +1,34 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cases + +import "golang.org/x/text/transform" + +type caseFolder struct{ transform.NopResetter } + +// caseFolder implements the Transformer interface for doing case folding. +func (t *caseFolder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + for c.next() { + foldFull(&c) + c.checkpoint() + } + return c.ret() +} + +func (t *caseFolder) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && isFoldFull(&c) { + c.checkpoint() + } + return c.retSpan() +} + +func makeFold(o options) transform.SpanningTransformer { + // TODO: Special case folding, through option Language, Special/Turkic, or + // both. + // TODO: Implement Compact options. + return &caseFolder{} +} diff --git a/vendor/golang.org/x/text/cases/icu.go b/vendor/golang.org/x/text/cases/icu.go new file mode 100644 index 000000000..46530d1e4 --- /dev/null +++ b/vendor/golang.org/x/text/cases/icu.go @@ -0,0 +1,61 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build icu + +package cases + +// Ideally these functions would be defined in a test file, but go test doesn't +// allow CGO in tests. The build tag should ensure either way that these +// functions will not end up in the package. + +// TODO: Ensure that the correct ICU version is set. + +/* +#cgo LDFLAGS: -licui18n.57 -licuuc.57 +#include +#include +#include +#include +#include +*/ +import "C" + +import "unsafe" + +func doICU(tag, caser, input string) string { + err := C.UErrorCode(0) + loc := C.CString(tag) + cm := C.ucasemap_open(loc, C.uint32_t(0), &err) + + buf := make([]byte, len(input)*4) + dst := (*C.char)(unsafe.Pointer(&buf[0])) + src := C.CString(input) + + cn := C.int32_t(0) + + switch caser { + case "fold": + cn = C.ucasemap_utf8FoldCase(cm, + dst, C.int32_t(len(buf)), + src, C.int32_t(len(input)), + &err) + case "lower": + cn = C.ucasemap_utf8ToLower(cm, + dst, C.int32_t(len(buf)), + src, C.int32_t(len(input)), + &err) + case "upper": + cn = C.ucasemap_utf8ToUpper(cm, + dst, C.int32_t(len(buf)), + src, C.int32_t(len(input)), + &err) + case "title": + cn = C.ucasemap_utf8ToTitle(cm, + dst, C.int32_t(len(buf)), + src, C.int32_t(len(input)), + &err) + } + return string(buf[:cn]) +} diff --git a/vendor/golang.org/x/text/cases/info.go b/vendor/golang.org/x/text/cases/info.go new file mode 100644 index 000000000..3b51f03d6 --- /dev/null +++ b/vendor/golang.org/x/text/cases/info.go @@ -0,0 +1,82 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cases + +func (c info) cccVal() info { + if c&exceptionBit != 0 { + return info(exceptions[c>>exceptionShift]) & cccMask + } + return c & cccMask +} + +func (c info) cccType() info { + ccc := c.cccVal() + if ccc <= cccZero { + return cccZero + } + return ccc +} + +// TODO: Implement full Unicode breaking algorithm: +// 1) Implement breaking in separate package. +// 2) Use the breaker here. +// 3) Compare table size and performance of using the more generic breaker. +// +// Note that we can extend the current algorithm to be much more accurate. This +// only makes sense, though, if the performance and/or space penalty of using +// the generic breaker is big. Extra data will only be needed for non-cased +// runes, which means there are sufficient bits left in the caseType. +// ICU prohibits breaking in such cases as well. + +// For the purpose of title casing we use an approximation of the Unicode Word +// Breaking algorithm defined in Annex #29: +// http://www.unicode.org/reports/tr29/#Default_Grapheme_Cluster_Table. +// +// For our approximation, we group the Word Break types into the following +// categories, with associated rules: +// +// 1) Letter: +// ALetter, Hebrew_Letter, Numeric, ExtendNumLet, Extend, Format_FE, ZWJ. +// Rule: Never break between consecutive runes of this category. +// +// 2) Mid: +// MidLetter, MidNumLet, Single_Quote. +// (Cf. case-ignorable: MidLetter, MidNumLet, Single_Quote or cat is Mn, +// Me, Cf, Lm or Sk). +// Rule: Don't break between Letter and Mid, but break between two Mids. +// +// 3) Break: +// Any other category: NewLine, MidNum, CR, LF, Double_Quote, Katakana, and +// Other. +// These categories should always result in a break between two cased letters. +// Rule: Always break. +// +// Note 1: the Katakana and MidNum categories can, in esoteric cases, result in +// preventing a break between two cased letters. For now we will ignore this +// (e.g. [ALetter] [ExtendNumLet] [Katakana] [ExtendNumLet] [ALetter] and +// [ALetter] [Numeric] [MidNum] [Numeric] [ALetter].) +// +// Note 2: the rule for Mid is very approximate, but works in most cases. To +// improve, we could store the categories in the trie value and use a FA to +// manage breaks. See TODO comment above. +// +// Note 3: according to the spec, it is possible for the Extend category to +// introduce breaks between other categories grouped in Letter. However, this +// is undesirable for our purposes. ICU prevents breaks in such cases as well. + +// isBreak returns whether this rune should introduce a break. +func (c info) isBreak() bool { + return c.cccVal() == cccBreak +} + +// isLetter returns whether the rune is of break type ALetter, Hebrew_Letter, +// Numeric, ExtendNumLet, or Extend. +func (c info) isLetter() bool { + ccc := c.cccVal() + if ccc == cccZero { + return !c.isCaseIgnorable() + } + return ccc != cccBreak +} diff --git a/vendor/golang.org/x/text/cases/map.go b/vendor/golang.org/x/text/cases/map.go new file mode 100644 index 000000000..4baebaaa6 --- /dev/null +++ b/vendor/golang.org/x/text/cases/map.go @@ -0,0 +1,816 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cases + +// This file contains the definitions of case mappings for all supported +// languages. The rules for the language-specific tailorings were taken and +// modified from the CLDR transform definitions in common/transforms. + +import ( + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/text/internal" + "golang.org/x/text/language" + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +// A mapFunc takes a context set to the current rune and writes the mapped +// version to the same context. It may advance the context to the next rune. It +// returns whether a checkpoint is possible: whether the pDst bytes written to +// dst so far won't need changing as we see more source bytes. +type mapFunc func(*context) bool + +// A spanFunc takes a context set to the current rune and returns whether this +// rune would be altered when written to the output. It may advance the context +// to the next rune. It returns whether a checkpoint is possible. +type spanFunc func(*context) bool + +// maxIgnorable defines the maximum number of ignorables to consider for +// lookahead operations. +const maxIgnorable = 30 + +// supported lists the language tags for which we have tailorings. +const supported = "und af az el lt nl tr" + +func init() { + tags := []language.Tag{} + for _, s := range strings.Split(supported, " ") { + tags = append(tags, language.MustParse(s)) + } + matcher = internal.NewInheritanceMatcher(tags) + Supported = language.NewCoverage(tags) +} + +var ( + matcher *internal.InheritanceMatcher + + Supported language.Coverage + + // We keep the following lists separate, instead of having a single per- + // language struct, to give the compiler a chance to remove unused code. + + // Some uppercase mappers are stateless, so we can precompute the + // Transformers and save a bit on runtime allocations. + upperFunc = []struct { + upper mapFunc + span spanFunc + }{ + {nil, nil}, // und + {nil, nil}, // af + {aztrUpper(upper), isUpper}, // az + {elUpper, noSpan}, // el + {ltUpper(upper), noSpan}, // lt + {nil, nil}, // nl + {aztrUpper(upper), isUpper}, // tr + } + + undUpper transform.SpanningTransformer = &undUpperCaser{} + undLower transform.SpanningTransformer = &undLowerCaser{} + undLowerIgnoreSigma transform.SpanningTransformer = &undLowerIgnoreSigmaCaser{} + + lowerFunc = []mapFunc{ + nil, // und + nil, // af + aztrLower, // az + nil, // el + ltLower, // lt + nil, // nl + aztrLower, // tr + } + + titleInfos = []struct { + title mapFunc + lower mapFunc + titleSpan spanFunc + rewrite func(*context) + }{ + {title, lower, isTitle, nil}, // und + {title, lower, isTitle, afnlRewrite}, // af + {aztrUpper(title), aztrLower, isTitle, nil}, // az + {title, lower, isTitle, nil}, // el + {ltUpper(title), ltLower, noSpan, nil}, // lt + {nlTitle, lower, nlTitleSpan, afnlRewrite}, // nl + {aztrUpper(title), aztrLower, isTitle, nil}, // tr + } +) + +func makeUpper(t language.Tag, o options) transform.SpanningTransformer { + _, i, _ := matcher.Match(t) + f := upperFunc[i].upper + if f == nil { + return undUpper + } + return &simpleCaser{f: f, span: upperFunc[i].span} +} + +func makeLower(t language.Tag, o options) transform.SpanningTransformer { + _, i, _ := matcher.Match(t) + f := lowerFunc[i] + if f == nil { + if o.ignoreFinalSigma { + return undLowerIgnoreSigma + } + return undLower + } + if o.ignoreFinalSigma { + return &simpleCaser{f: f, span: isLower} + } + return &lowerCaser{ + first: f, + midWord: finalSigma(f), + } +} + +func makeTitle(t language.Tag, o options) transform.SpanningTransformer { + _, i, _ := matcher.Match(t) + x := &titleInfos[i] + lower := x.lower + if o.noLower { + lower = (*context).copy + } else if !o.ignoreFinalSigma { + lower = finalSigma(lower) + } + return &titleCaser{ + title: x.title, + lower: lower, + titleSpan: x.titleSpan, + rewrite: x.rewrite, + } +} + +func noSpan(c *context) bool { + c.err = transform.ErrEndOfSpan + return false +} + +// TODO: consider a similar special case for the fast majority lower case. This +// is a bit more involved so will require some more precise benchmarking to +// justify it. + +type undUpperCaser struct{ transform.NopResetter } + +// undUpperCaser implements the Transformer interface for doing an upper case +// mapping for the root locale (und). It eliminates the need for an allocation +// as it prevents escaping by not using function pointers. +func (t undUpperCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + for c.next() { + upper(&c) + c.checkpoint() + } + return c.ret() +} + +func (t undUpperCaser) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && isUpper(&c) { + c.checkpoint() + } + return c.retSpan() +} + +// undLowerIgnoreSigmaCaser implements the Transformer interface for doing +// a lower case mapping for the root locale (und) ignoring final sigma +// handling. This casing algorithm is used in some performance-critical packages +// like secure/precis and x/net/http/idna, which warrants its special-casing. +type undLowerIgnoreSigmaCaser struct{ transform.NopResetter } + +func (t undLowerIgnoreSigmaCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + for c.next() && lower(&c) { + c.checkpoint() + } + return c.ret() + +} + +// Span implements a generic lower-casing. This is possible as isLower works +// for all lowercasing variants. All lowercase variants only vary in how they +// transform a non-lowercase letter. They will never change an already lowercase +// letter. In addition, there is no state. +func (t undLowerIgnoreSigmaCaser) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && isLower(&c) { + c.checkpoint() + } + return c.retSpan() +} + +type simpleCaser struct { + context + f mapFunc + span spanFunc +} + +// simpleCaser implements the Transformer interface for doing a case operation +// on a rune-by-rune basis. +func (t *simpleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + for c.next() && t.f(&c) { + c.checkpoint() + } + return c.ret() +} + +func (t *simpleCaser) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && t.span(&c) { + c.checkpoint() + } + return c.retSpan() +} + +// undLowerCaser implements the Transformer interface for doing a lower case +// mapping for the root locale (und) ignoring final sigma handling. This casing +// algorithm is used in some performance-critical packages like secure/precis +// and x/net/http/idna, which warrants its special-casing. +type undLowerCaser struct{ transform.NopResetter } + +func (t undLowerCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + + for isInterWord := true; c.next(); { + if isInterWord { + if c.info.isCased() { + if !lower(&c) { + break + } + isInterWord = false + } else if !c.copy() { + break + } + } else { + if c.info.isNotCasedAndNotCaseIgnorable() { + if !c.copy() { + break + } + isInterWord = true + } else if !c.hasPrefix("Σ") { + if !lower(&c) { + break + } + } else if !finalSigmaBody(&c) { + break + } + } + c.checkpoint() + } + return c.ret() +} + +func (t undLowerCaser) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && isLower(&c) { + c.checkpoint() + } + return c.retSpan() +} + +// lowerCaser implements the Transformer interface. The default Unicode lower +// casing requires different treatment for the first and subsequent characters +// of a word, most notably to handle the Greek final Sigma. +type lowerCaser struct { + undLowerIgnoreSigmaCaser + + context + + first, midWord mapFunc +} + +func (t *lowerCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + t.context = context{dst: dst, src: src, atEOF: atEOF} + c := &t.context + + for isInterWord := true; c.next(); { + if isInterWord { + if c.info.isCased() { + if !t.first(c) { + break + } + isInterWord = false + } else if !c.copy() { + break + } + } else { + if c.info.isNotCasedAndNotCaseIgnorable() { + if !c.copy() { + break + } + isInterWord = true + } else if !t.midWord(c) { + break + } + } + c.checkpoint() + } + return c.ret() +} + +// titleCaser implements the Transformer interface. Title casing algorithms +// distinguish between the first letter of a word and subsequent letters of the +// same word. It uses state to avoid requiring a potentially infinite lookahead. +type titleCaser struct { + context + + // rune mappings used by the actual casing algorithms. + title mapFunc + lower mapFunc + titleSpan spanFunc + + rewrite func(*context) +} + +// Transform implements the standard Unicode title case algorithm as defined in +// Chapter 3 of The Unicode Standard: +// toTitlecase(X): Find the word boundaries in X according to Unicode Standard +// Annex #29, "Unicode Text Segmentation." For each word boundary, find the +// first cased character F following the word boundary. If F exists, map F to +// Titlecase_Mapping(F); then map all characters C between F and the following +// word boundary to Lowercase_Mapping(C). +func (t *titleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + t.context = context{dst: dst, src: src, atEOF: atEOF, isMidWord: t.isMidWord} + c := &t.context + + if !c.next() { + return c.ret() + } + + for { + p := c.info + if t.rewrite != nil { + t.rewrite(c) + } + + wasMid := p.isMid() + // Break out of this loop on failure to ensure we do not modify the + // state incorrectly. + if p.isCased() { + if !c.isMidWord { + if !t.title(c) { + break + } + c.isMidWord = true + } else if !t.lower(c) { + break + } + } else if !c.copy() { + break + } else if p.isBreak() { + c.isMidWord = false + } + + // As we save the state of the transformer, it is safe to call + // checkpoint after any successful write. + if !(c.isMidWord && wasMid) { + c.checkpoint() + } + + if !c.next() { + break + } + if wasMid && c.info.isMid() { + c.isMidWord = false + } + } + return c.ret() +} + +func (t *titleCaser) Span(src []byte, atEOF bool) (n int, err error) { + t.context = context{src: src, atEOF: atEOF, isMidWord: t.isMidWord} + c := &t.context + + if !c.next() { + return c.retSpan() + } + + for { + p := c.info + if t.rewrite != nil { + t.rewrite(c) + } + + wasMid := p.isMid() + // Break out of this loop on failure to ensure we do not modify the + // state incorrectly. + if p.isCased() { + if !c.isMidWord { + if !t.titleSpan(c) { + break + } + c.isMidWord = true + } else if !isLower(c) { + break + } + } else if p.isBreak() { + c.isMidWord = false + } + // As we save the state of the transformer, it is safe to call + // checkpoint after any successful write. + if !(c.isMidWord && wasMid) { + c.checkpoint() + } + + if !c.next() { + break + } + if wasMid && c.info.isMid() { + c.isMidWord = false + } + } + return c.retSpan() +} + +// finalSigma adds Greek final Sigma handing to another casing function. It +// determines whether a lowercased sigma should be σ or ς, by looking ahead for +// case-ignorables and a cased letters. +func finalSigma(f mapFunc) mapFunc { + return func(c *context) bool { + if !c.hasPrefix("Σ") { + return f(c) + } + return finalSigmaBody(c) + } +} + +func finalSigmaBody(c *context) bool { + // Current rune must be ∑. + + // ::NFD(); + // # 03A3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK CAPITAL LETTER SIGMA + // Σ } [:case-ignorable:]* [:cased:] → σ; + // [:cased:] [:case-ignorable:]* { Σ → ς; + // ::Any-Lower; + // ::NFC(); + + p := c.pDst + c.writeString("ς") + + // TODO: we should do this here, but right now this will never have an + // effect as this is called when the prefix is Sigma, whereas Dutch and + // Afrikaans only test for an apostrophe. + // + // if t.rewrite != nil { + // t.rewrite(c) + // } + + // We need to do one more iteration after maxIgnorable, as a cased + // letter is not an ignorable and may modify the result. + wasMid := false + for i := 0; i < maxIgnorable+1; i++ { + if !c.next() { + return false + } + if !c.info.isCaseIgnorable() { + // All Midword runes are also case ignorable, so we are + // guaranteed to have a letter or word break here. As we are + // unreading the run, there is no need to unset c.isMidWord; + // the title caser will handle this. + if c.info.isCased() { + // p+1 is guaranteed to be in bounds: if writing ς was + // successful, p+1 will contain the second byte of ς. If not, + // this function will have returned after c.next returned false. + c.dst[p+1]++ // ς → σ + } + c.unreadRune() + return true + } + // A case ignorable may also introduce a word break, so we may need + // to continue searching even after detecting a break. + isMid := c.info.isMid() + if (wasMid && isMid) || c.info.isBreak() { + c.isMidWord = false + } + wasMid = isMid + c.copy() + } + return true +} + +// finalSigmaSpan would be the same as isLower. + +// elUpper implements Greek upper casing, which entails removing a predefined +// set of non-blocked modifiers. Note that these accents should not be removed +// for title casing! +// Example: "Οδός" -> "ΟΔΟΣ". +func elUpper(c *context) bool { + // From CLDR: + // [:Greek:] [^[:ccc=Not_Reordered:][:ccc=Above:]]*? { [\u0313\u0314\u0301\u0300\u0306\u0342\u0308\u0304] → ; + // [:Greek:] [^[:ccc=Not_Reordered:][:ccc=Iota_Subscript:]]*? { \u0345 → ; + + r, _ := utf8.DecodeRune(c.src[c.pSrc:]) + oldPDst := c.pDst + if !upper(c) { + return false + } + if !unicode.Is(unicode.Greek, r) { + return true + } + i := 0 + // Take the properties of the uppercased rune that is already written to the + // destination. This saves us the trouble of having to uppercase the + // decomposed rune again. + if b := norm.NFD.Properties(c.dst[oldPDst:]).Decomposition(); b != nil { + // Restore the destination position and process the decomposed rune. + r, sz := utf8.DecodeRune(b) + if r <= 0xFF { // See A.6.1 + return true + } + c.pDst = oldPDst + // Insert the first rune and ignore the modifiers. See A.6.2. + c.writeBytes(b[:sz]) + i = len(b[sz:]) / 2 // Greek modifiers are always of length 2. + } + + for ; i < maxIgnorable && c.next(); i++ { + switch r, _ := utf8.DecodeRune(c.src[c.pSrc:]); r { + // Above and Iota Subscript + case 0x0300, // U+0300 COMBINING GRAVE ACCENT + 0x0301, // U+0301 COMBINING ACUTE ACCENT + 0x0304, // U+0304 COMBINING MACRON + 0x0306, // U+0306 COMBINING BREVE + 0x0308, // U+0308 COMBINING DIAERESIS + 0x0313, // U+0313 COMBINING COMMA ABOVE + 0x0314, // U+0314 COMBINING REVERSED COMMA ABOVE + 0x0342, // U+0342 COMBINING GREEK PERISPOMENI + 0x0345: // U+0345 COMBINING GREEK YPOGEGRAMMENI + // No-op. Gobble the modifier. + + default: + switch v, _ := trie.lookup(c.src[c.pSrc:]); info(v).cccType() { + case cccZero: + c.unreadRune() + return true + + // We don't need to test for IotaSubscript as the only rune that + // qualifies (U+0345) was already excluded in the switch statement + // above. See A.4. + + case cccAbove: + return c.copy() + default: + // Some other modifier. We're still allowed to gobble Greek + // modifiers after this. + c.copy() + } + } + } + return i == maxIgnorable +} + +// TODO: implement elUpperSpan (low-priority: complex and infrequent). + +func ltLower(c *context) bool { + // From CLDR: + // # Introduce an explicit dot above when lowercasing capital I's and J's + // # whenever there are more accents above. + // # (of the accents used in Lithuanian: grave, acute, tilde above, and ogonek) + // # 0049; 0069 0307; 0049; 0049; lt More_Above; # LATIN CAPITAL LETTER I + // # 004A; 006A 0307; 004A; 004A; lt More_Above; # LATIN CAPITAL LETTER J + // # 012E; 012F 0307; 012E; 012E; lt More_Above; # LATIN CAPITAL LETTER I WITH OGONEK + // # 00CC; 0069 0307 0300; 00CC; 00CC; lt; # LATIN CAPITAL LETTER I WITH GRAVE + // # 00CD; 0069 0307 0301; 00CD; 00CD; lt; # LATIN CAPITAL LETTER I WITH ACUTE + // # 0128; 0069 0307 0303; 0128; 0128; lt; # LATIN CAPITAL LETTER I WITH TILDE + // ::NFD(); + // I } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0307; + // J } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → j \u0307; + // I \u0328 (Į) } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0328 \u0307; + // I \u0300 (Ì) → i \u0307 \u0300; + // I \u0301 (Í) → i \u0307 \u0301; + // I \u0303 (Ĩ) → i \u0307 \u0303; + // ::Any-Lower(); + // ::NFC(); + + i := 0 + if r := c.src[c.pSrc]; r < utf8.RuneSelf { + lower(c) + if r != 'I' && r != 'J' { + return true + } + } else { + p := norm.NFD.Properties(c.src[c.pSrc:]) + if d := p.Decomposition(); len(d) >= 3 && (d[0] == 'I' || d[0] == 'J') { + // UTF-8 optimization: the decomposition will only have an above + // modifier if the last rune of the decomposition is in [U+300-U+311]. + // In all other cases, a decomposition starting with I is always + // an I followed by modifiers that are not cased themselves. See A.2. + if d[1] == 0xCC && d[2] <= 0x91 { // A.2.4. + if !c.writeBytes(d[:1]) { + return false + } + c.dst[c.pDst-1] += 'a' - 'A' // lower + + // Assumption: modifier never changes on lowercase. See A.1. + // Assumption: all modifiers added have CCC = Above. See A.2.3. + return c.writeString("\u0307") && c.writeBytes(d[1:]) + } + // In all other cases the additional modifiers will have a CCC + // that is less than 230 (Above). We will insert the U+0307, if + // needed, after these modifiers so that a string in FCD form + // will remain so. See A.2.2. + lower(c) + i = 1 + } else { + return lower(c) + } + } + + for ; i < maxIgnorable && c.next(); i++ { + switch c.info.cccType() { + case cccZero: + c.unreadRune() + return true + case cccAbove: + return c.writeString("\u0307") && c.copy() // See A.1. + default: + c.copy() // See A.1. + } + } + return i == maxIgnorable +} + +// ltLowerSpan would be the same as isLower. + +func ltUpper(f mapFunc) mapFunc { + return func(c *context) bool { + // Unicode: + // 0307; 0307; ; ; lt After_Soft_Dotted; # COMBINING DOT ABOVE + // + // From CLDR: + // # Remove \u0307 following soft-dotteds (i, j, and the like), with possible + // # intervening non-230 marks. + // ::NFD(); + // [:Soft_Dotted:] [^[:ccc=Not_Reordered:][:ccc=Above:]]* { \u0307 → ; + // ::Any-Upper(); + // ::NFC(); + + // TODO: See A.5. A soft-dotted rune never has an exception. This would + // allow us to overload the exception bit and encode this property in + // info. Need to measure performance impact of this. + r, _ := utf8.DecodeRune(c.src[c.pSrc:]) + oldPDst := c.pDst + if !f(c) { + return false + } + if !unicode.Is(unicode.Soft_Dotted, r) { + return true + } + + // We don't need to do an NFD normalization, as a soft-dotted rune never + // contains U+0307. See A.3. + + i := 0 + for ; i < maxIgnorable && c.next(); i++ { + switch c.info.cccType() { + case cccZero: + c.unreadRune() + return true + case cccAbove: + if c.hasPrefix("\u0307") { + // We don't do a full NFC, but rather combine runes for + // some of the common cases. (Returning NFC or + // preserving normal form is neither a requirement nor + // a possibility anyway). + if !c.next() { + return false + } + if c.dst[oldPDst] == 'I' && c.pDst == oldPDst+1 && c.src[c.pSrc] == 0xcc { + s := "" + switch c.src[c.pSrc+1] { + case 0x80: // U+0300 COMBINING GRAVE ACCENT + s = "\u00cc" // U+00CC LATIN CAPITAL LETTER I WITH GRAVE + case 0x81: // U+0301 COMBINING ACUTE ACCENT + s = "\u00cd" // U+00CD LATIN CAPITAL LETTER I WITH ACUTE + case 0x83: // U+0303 COMBINING TILDE + s = "\u0128" // U+0128 LATIN CAPITAL LETTER I WITH TILDE + case 0x88: // U+0308 COMBINING DIAERESIS + s = "\u00cf" // U+00CF LATIN CAPITAL LETTER I WITH DIAERESIS + default: + } + if s != "" { + c.pDst = oldPDst + return c.writeString(s) + } + } + } + return c.copy() + default: + c.copy() + } + } + return i == maxIgnorable + } +} + +// TODO: implement ltUpperSpan (low priority: complex and infrequent). + +func aztrUpper(f mapFunc) mapFunc { + return func(c *context) bool { + // i→İ; + if c.src[c.pSrc] == 'i' { + return c.writeString("İ") + } + return f(c) + } +} + +func aztrLower(c *context) (done bool) { + // From CLDR: + // # I and i-dotless; I-dot and i are case pairs in Turkish and Azeri + // # 0130; 0069; 0130; 0130; tr; # LATIN CAPITAL LETTER I WITH DOT ABOVE + // İ→i; + // # When lowercasing, remove dot_above in the sequence I + dot_above, which will turn into i. + // # This matches the behavior of the canonically equivalent I-dot_above + // # 0307; ; 0307; 0307; tr After_I; # COMBINING DOT ABOVE + // # When lowercasing, unless an I is before a dot_above, it turns into a dotless i. + // # 0049; 0131; 0049; 0049; tr Not_Before_Dot; # LATIN CAPITAL LETTER I + // I([^[:ccc=Not_Reordered:][:ccc=Above:]]*)\u0307 → i$1 ; + // I→ı ; + // ::Any-Lower(); + if c.hasPrefix("\u0130") { // İ + return c.writeString("i") + } + if c.src[c.pSrc] != 'I' { + return lower(c) + } + + // We ignore the lower-case I for now, but insert it later when we know + // which form we need. + start := c.pSrc + c.sz + + i := 0 +Loop: + // We check for up to n ignorables before \u0307. As \u0307 is an + // ignorable as well, n is maxIgnorable-1. + for ; i < maxIgnorable && c.next(); i++ { + switch c.info.cccType() { + case cccAbove: + if c.hasPrefix("\u0307") { + return c.writeString("i") && c.writeBytes(c.src[start:c.pSrc]) // ignore U+0307 + } + done = true + break Loop + case cccZero: + c.unreadRune() + done = true + break Loop + default: + // We'll write this rune after we know which starter to use. + } + } + if i == maxIgnorable { + done = true + } + return c.writeString("ı") && c.writeBytes(c.src[start:c.pSrc+c.sz]) && done +} + +// aztrLowerSpan would be the same as isLower. + +func nlTitle(c *context) bool { + // From CLDR: + // # Special titlecasing for Dutch initial "ij". + // ::Any-Title(); + // # Fix up Ij at the beginning of a "word" (per Any-Title, notUAX #29) + // [:^WB=ALetter:] [:WB=Extend:]* [[:WB=MidLetter:][:WB=MidNumLet:]]? { Ij } → IJ ; + if c.src[c.pSrc] != 'I' && c.src[c.pSrc] != 'i' { + return title(c) + } + + if !c.writeString("I") || !c.next() { + return false + } + if c.src[c.pSrc] == 'j' || c.src[c.pSrc] == 'J' { + return c.writeString("J") + } + c.unreadRune() + return true +} + +func nlTitleSpan(c *context) bool { + // From CLDR: + // # Special titlecasing for Dutch initial "ij". + // ::Any-Title(); + // # Fix up Ij at the beginning of a "word" (per Any-Title, notUAX #29) + // [:^WB=ALetter:] [:WB=Extend:]* [[:WB=MidLetter:][:WB=MidNumLet:]]? { Ij } → IJ ; + if c.src[c.pSrc] != 'I' { + return isTitle(c) + } + if !c.next() || c.src[c.pSrc] == 'j' { + return false + } + if c.src[c.pSrc] != 'J' { + c.unreadRune() + } + return true +} + +// Not part of CLDR, but see http://unicode.org/cldr/trac/ticket/7078. +func afnlRewrite(c *context) { + if c.hasPrefix("'") || c.hasPrefix("’") { + c.isMidWord = true + } +} diff --git a/vendor/golang.org/x/text/cases/tables.go b/vendor/golang.org/x/text/cases/tables.go new file mode 100644 index 000000000..e6e95a685 --- /dev/null +++ b/vendor/golang.org/x/text/cases/tables.go @@ -0,0 +1,2211 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package cases + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +var xorData string = "" + // Size: 185 bytes + "\x00\x06\x07\x00\x01?\x00\x0f\x03\x00\x0f\x12\x00\x0f\x1f\x00\x0f\x1d" + + "\x00\x01\x13\x00\x0f\x16\x00\x0f\x0b\x00\x0f3\x00\x0f7\x00\x01#\x00\x0f?" + + "\x00\x0e'\x00\x0f/\x00\x0e>\x00\x0f*\x00\x0c&\x00\x0c*\x00\x0c;\x00\x0c9" + + "\x00\x0c%\x00\x01\x08\x00\x03\x0d\x00\x03\x09\x00\x02\x06\x00\x02\x02" + + "\x00\x02\x0c\x00\x01\x00\x00\x01\x03\x00\x01\x01\x00\x01 \x00\x01\x0c" + + "\x00\x01\x10\x00\x03\x10\x00\x036 \x00\x037 \x00\x0b#\x10\x00\x0b 0\x00" + + "\x0b!\x10\x00\x0b!0\x00\x0b(\x04\x00\x03\x04\x1e\x00\x03\x0a\x00\x02:" + + "\x00\x02>\x00\x02,\x00\x02\x00\x00\x02\x10\x00\x01<\x00\x01&\x00\x01*" + + "\x00\x01.\x00\x010\x003 \x00\x01\x18\x00\x01(\x00\x01\x1e\x00\x01\x22" + +var exceptions string = "" + // Size: 1852 bytes + "\x00\x12\x10μΜ\x12\x12ssSSSs\x13\x18i̇i̇\x10\x08I\x13\x18ʼnʼN\x11\x08sS" + + "\x12\x12dždžDž\x12\x12dždžDŽ\x10\x12DŽDž\x12\x12ljljLj\x12\x12ljljLJ\x10\x12LJLj\x12\x12" + + "njnjNj\x12\x12njnjNJ\x10\x12NJNj\x13\x18ǰJ̌\x12\x12dzdzDz\x12\x12dzdzDZ\x10\x12DZDz" + + "\x13\x18ⱥⱥ\x13\x18ⱦⱦ\x10\x18Ȿ\x10\x18Ɀ\x10\x18Ɐ\x10\x18Ɑ\x10\x18Ɒ\x10" + + "\x18Ɜ\x10\x18Ɡ\x10\x18Ɥ\x10\x18Ɦ\x10\x18Ɪ\x10\x18Ɫ\x10\x18Ɬ\x10\x18Ɱ\x10" + + "\x18Ɽ\x10\x18Ʇ\x10\x18Ʝ\x10\x18Ʞ2\x10ιΙ\x160ΐΪ́\x160ΰΫ́\x12\x10σΣ" + + "\x12\x10βΒ\x12\x10θΘ\x12\x10φΦ\x12\x10πΠ\x12\x10κΚ\x12\x10ρΡ\x12\x10εΕ" + + "\x14$եւԵՒԵւ\x12\x10вВ\x12\x10дД\x12\x10оО\x12\x10сС\x12\x10тТ\x12\x10тТ" + + "\x12\x10ъЪ\x12\x10ѣѢ\x13\x18ꙋꙊ\x13\x18ẖH̱\x13\x18ẗT̈\x13\x18ẘW̊\x13" + + "\x18ẙY̊\x13\x18aʾAʾ\x13\x18ṡṠ\x12\x10ssß\x14 ὐΥ̓\x160ὒΥ̓̀\x160ὔΥ̓́" + + "\x160ὖΥ̓͂\x15+ἀιἈΙᾈ\x15+ἁιἉΙᾉ\x15+ἂιἊΙᾊ\x15+ἃιἋΙᾋ\x15+ἄιἌΙᾌ\x15+ἅιἍΙᾍ" + + "\x15+ἆιἎΙᾎ\x15+ἇιἏΙᾏ\x15\x1dἀιᾀἈΙ\x15\x1dἁιᾁἉΙ\x15\x1dἂιᾂἊΙ\x15\x1dἃιᾃἋΙ" + + "\x15\x1dἄιᾄἌΙ\x15\x1dἅιᾅἍΙ\x15\x1dἆιᾆἎΙ\x15\x1dἇιᾇἏΙ\x15+ἠιἨΙᾘ\x15+ἡιἩΙᾙ" + + "\x15+ἢιἪΙᾚ\x15+ἣιἫΙᾛ\x15+ἤιἬΙᾜ\x15+ἥιἭΙᾝ\x15+ἦιἮΙᾞ\x15+ἧιἯΙᾟ\x15\x1dἠιᾐἨ" + + "Ι\x15\x1dἡιᾑἩΙ\x15\x1dἢιᾒἪΙ\x15\x1dἣιᾓἫΙ\x15\x1dἤιᾔἬΙ\x15\x1dἥιᾕἭΙ\x15" + + "\x1dἦιᾖἮΙ\x15\x1dἧιᾗἯΙ\x15+ὠιὨΙᾨ\x15+ὡιὩΙᾩ\x15+ὢιὪΙᾪ\x15+ὣιὫΙᾫ\x15+ὤιὬΙᾬ" + + "\x15+ὥιὭΙᾭ\x15+ὦιὮΙᾮ\x15+ὧιὯΙᾯ\x15\x1dὠιᾠὨΙ\x15\x1dὡιᾡὩΙ\x15\x1dὢιᾢὪΙ" + + "\x15\x1dὣιᾣὫΙ\x15\x1dὤιᾤὬΙ\x15\x1dὥιᾥὭΙ\x15\x1dὦιᾦὮΙ\x15\x1dὧιᾧὯΙ\x15-ὰι" + + "ᾺΙᾺͅ\x14#αιΑΙᾼ\x14$άιΆΙΆͅ\x14 ᾶΑ͂\x166ᾶιΑ͂Ιᾼ͂\x14\x1cαιᾳΑΙ\x12\x10ι" + + "Ι\x15-ὴιῊΙῊͅ\x14#ηιΗΙῌ\x14$ήιΉΙΉͅ\x14 ῆΗ͂\x166ῆιΗ͂Ιῌ͂\x14\x1cηιῃΗΙ" + + "\x160ῒΪ̀\x160ΐΪ́\x14 ῖΙ͂\x160ῗΪ͂\x160ῢΫ̀\x160ΰΫ́\x14 ῤΡ" + + "̓\x14 ῦΥ͂\x160ῧΫ͂\x15-ὼιῺΙῺͅ\x14#ωιΩΙῼ\x14$ώιΏΙΏͅ\x14 ῶΩ͂\x166ῶιΩ" + + "͂Ιῼ͂\x14\x1cωιῳΩΙ\x12\x10ωω\x11\x08kk\x12\x10åå\x12\x10ɫɫ\x12\x10ɽɽ" + + "\x10\x10Ⱥ\x10\x10Ⱦ\x12\x10ɑɑ\x12\x10ɱɱ\x12\x10ɐɐ\x12\x10ɒɒ\x12\x10ȿȿ\x12" + + "\x10ɀɀ\x12\x10ɥɥ\x12\x10ɦɦ\x12\x10ɜɜ\x12\x10ɡɡ\x12\x10ɬɬ\x12\x10ɪɪ\x12" + + "\x10ʞʞ\x12\x10ʇʇ\x12\x10ʝʝ\x12\x12ffFFFf\x12\x12fiFIFi\x12\x12flFLFl\x13" + + "\x1bffiFFIFfi\x13\x1bfflFFLFfl\x12\x12stSTSt\x12\x12stSTSt\x14$մնՄՆՄն" + + "\x14$մեՄԵՄե\x14$միՄԻՄի\x14$վնՎՆՎն\x14$մխՄԽՄխ" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// caseTrie. Total size: 11742 bytes (11.47 KiB). Checksum: 147a11466b427436. +type caseTrie struct{} + +func newCaseTrie(i int) *caseTrie { + return &caseTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *caseTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 18: + return uint16(caseValues[n<<6+uint32(b)]) + default: + n -= 18 + return uint16(sparse.lookup(n, b)) + } +} + +// caseValues: 20 blocks, 1280 entries, 2560 bytes +// The third block is the zero block. +var caseValues = [1280]uint16{ + // Block 0x0, offset 0x0 + 0x27: 0x0054, + 0x2e: 0x0054, + 0x30: 0x0010, 0x31: 0x0010, 0x32: 0x0010, 0x33: 0x0010, 0x34: 0x0010, 0x35: 0x0010, + 0x36: 0x0010, 0x37: 0x0010, 0x38: 0x0010, 0x39: 0x0010, 0x3a: 0x0054, + // Block 0x1, offset 0x40 + 0x41: 0x2013, 0x42: 0x2013, 0x43: 0x2013, 0x44: 0x2013, 0x45: 0x2013, + 0x46: 0x2013, 0x47: 0x2013, 0x48: 0x2013, 0x49: 0x2013, 0x4a: 0x2013, 0x4b: 0x2013, + 0x4c: 0x2013, 0x4d: 0x2013, 0x4e: 0x2013, 0x4f: 0x2013, 0x50: 0x2013, 0x51: 0x2013, + 0x52: 0x2013, 0x53: 0x2013, 0x54: 0x2013, 0x55: 0x2013, 0x56: 0x2013, 0x57: 0x2013, + 0x58: 0x2013, 0x59: 0x2013, 0x5a: 0x2013, + 0x5e: 0x0004, 0x5f: 0x0010, 0x60: 0x0004, 0x61: 0x2012, 0x62: 0x2012, 0x63: 0x2012, + 0x64: 0x2012, 0x65: 0x2012, 0x66: 0x2012, 0x67: 0x2012, 0x68: 0x2012, 0x69: 0x2012, + 0x6a: 0x2012, 0x6b: 0x2012, 0x6c: 0x2012, 0x6d: 0x2012, 0x6e: 0x2012, 0x6f: 0x2012, + 0x70: 0x2012, 0x71: 0x2012, 0x72: 0x2012, 0x73: 0x2012, 0x74: 0x2012, 0x75: 0x2012, + 0x76: 0x2012, 0x77: 0x2012, 0x78: 0x2012, 0x79: 0x2012, 0x7a: 0x2012, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0852, 0xc1: 0x0b53, 0xc2: 0x0113, 0xc3: 0x0112, 0xc4: 0x0113, 0xc5: 0x0112, + 0xc6: 0x0b53, 0xc7: 0x0f13, 0xc8: 0x0f12, 0xc9: 0x0e53, 0xca: 0x1153, 0xcb: 0x0713, + 0xcc: 0x0712, 0xcd: 0x0012, 0xce: 0x1453, 0xcf: 0x1753, 0xd0: 0x1a53, 0xd1: 0x0313, + 0xd2: 0x0312, 0xd3: 0x1d53, 0xd4: 0x2053, 0xd5: 0x2352, 0xd6: 0x2653, 0xd7: 0x2653, + 0xd8: 0x0113, 0xd9: 0x0112, 0xda: 0x2952, 0xdb: 0x0012, 0xdc: 0x1d53, 0xdd: 0x2c53, + 0xde: 0x2f52, 0xdf: 0x3253, 0xe0: 0x0113, 0xe1: 0x0112, 0xe2: 0x0113, 0xe3: 0x0112, + 0xe4: 0x0113, 0xe5: 0x0112, 0xe6: 0x3553, 0xe7: 0x0f13, 0xe8: 0x0f12, 0xe9: 0x3853, + 0xea: 0x0012, 0xeb: 0x0012, 0xec: 0x0113, 0xed: 0x0112, 0xee: 0x3553, 0xef: 0x1f13, + 0xf0: 0x1f12, 0xf1: 0x3b53, 0xf2: 0x3e53, 0xf3: 0x0713, 0xf4: 0x0712, 0xf5: 0x0313, + 0xf6: 0x0312, 0xf7: 0x4153, 0xf8: 0x0113, 0xf9: 0x0112, 0xfa: 0x0012, 0xfb: 0x0010, + 0xfc: 0x0113, 0xfd: 0x0112, 0xfe: 0x0012, 0xff: 0x4452, + // Block 0x4, offset 0x100 + 0x100: 0x0010, 0x101: 0x0010, 0x102: 0x0010, 0x103: 0x0010, 0x104: 0x04cb, 0x105: 0x05c9, + 0x106: 0x06ca, 0x107: 0x078b, 0x108: 0x0889, 0x109: 0x098a, 0x10a: 0x0a4b, 0x10b: 0x0b49, + 0x10c: 0x0c4a, 0x10d: 0x0313, 0x10e: 0x0312, 0x10f: 0x1f13, 0x110: 0x1f12, 0x111: 0x0313, + 0x112: 0x0312, 0x113: 0x0713, 0x114: 0x0712, 0x115: 0x0313, 0x116: 0x0312, 0x117: 0x0f13, + 0x118: 0x0f12, 0x119: 0x0313, 0x11a: 0x0312, 0x11b: 0x0713, 0x11c: 0x0712, 0x11d: 0x1452, + 0x11e: 0x0113, 0x11f: 0x0112, 0x120: 0x0113, 0x121: 0x0112, 0x122: 0x0113, 0x123: 0x0112, + 0x124: 0x0113, 0x125: 0x0112, 0x126: 0x0113, 0x127: 0x0112, 0x128: 0x0113, 0x129: 0x0112, + 0x12a: 0x0113, 0x12b: 0x0112, 0x12c: 0x0113, 0x12d: 0x0112, 0x12e: 0x0113, 0x12f: 0x0112, + 0x130: 0x0d0a, 0x131: 0x0e0b, 0x132: 0x0f09, 0x133: 0x100a, 0x134: 0x0113, 0x135: 0x0112, + 0x136: 0x2353, 0x137: 0x4453, 0x138: 0x0113, 0x139: 0x0112, 0x13a: 0x0113, 0x13b: 0x0112, + 0x13c: 0x0113, 0x13d: 0x0112, 0x13e: 0x0113, 0x13f: 0x0112, + // Block 0x5, offset 0x140 + 0x140: 0x136a, 0x141: 0x0313, 0x142: 0x0312, 0x143: 0x0853, 0x144: 0x4753, 0x145: 0x4a53, + 0x146: 0x0113, 0x147: 0x0112, 0x148: 0x0113, 0x149: 0x0112, 0x14a: 0x0113, 0x14b: 0x0112, + 0x14c: 0x0113, 0x14d: 0x0112, 0x14e: 0x0113, 0x14f: 0x0112, 0x150: 0x140a, 0x151: 0x14aa, + 0x152: 0x154a, 0x153: 0x0b52, 0x154: 0x0b52, 0x155: 0x0012, 0x156: 0x0e52, 0x157: 0x1152, + 0x158: 0x0012, 0x159: 0x1752, 0x15a: 0x0012, 0x15b: 0x1a52, 0x15c: 0x15ea, 0x15d: 0x0012, + 0x15e: 0x0012, 0x15f: 0x0012, 0x160: 0x1d52, 0x161: 0x168a, 0x162: 0x0012, 0x163: 0x2052, + 0x164: 0x0012, 0x165: 0x172a, 0x166: 0x17ca, 0x167: 0x0012, 0x168: 0x2652, 0x169: 0x2652, + 0x16a: 0x186a, 0x16b: 0x190a, 0x16c: 0x19aa, 0x16d: 0x0012, 0x16e: 0x0012, 0x16f: 0x1d52, + 0x170: 0x0012, 0x171: 0x1a4a, 0x172: 0x2c52, 0x173: 0x0012, 0x174: 0x0012, 0x175: 0x3252, + 0x176: 0x0012, 0x177: 0x0012, 0x178: 0x0012, 0x179: 0x0012, 0x17a: 0x0012, 0x17b: 0x0012, + 0x17c: 0x0012, 0x17d: 0x1aea, 0x17e: 0x0012, 0x17f: 0x0012, + // Block 0x6, offset 0x180 + 0x180: 0x3552, 0x181: 0x0012, 0x182: 0x0012, 0x183: 0x3852, 0x184: 0x0012, 0x185: 0x0012, + 0x186: 0x0012, 0x187: 0x1b8a, 0x188: 0x3552, 0x189: 0x4752, 0x18a: 0x3b52, 0x18b: 0x3e52, + 0x18c: 0x4a52, 0x18d: 0x0012, 0x18e: 0x0012, 0x18f: 0x0012, 0x190: 0x0012, 0x191: 0x0012, + 0x192: 0x4152, 0x193: 0x0012, 0x194: 0x0010, 0x195: 0x0012, 0x196: 0x0012, 0x197: 0x0012, + 0x198: 0x0012, 0x199: 0x0012, 0x19a: 0x0012, 0x19b: 0x0012, 0x19c: 0x0012, 0x19d: 0x1c2a, + 0x19e: 0x1cca, 0x19f: 0x0012, 0x1a0: 0x0012, 0x1a1: 0x0012, 0x1a2: 0x0012, 0x1a3: 0x0012, + 0x1a4: 0x0012, 0x1a5: 0x0012, 0x1a6: 0x0012, 0x1a7: 0x0012, 0x1a8: 0x0012, 0x1a9: 0x0012, + 0x1aa: 0x0012, 0x1ab: 0x0012, 0x1ac: 0x0012, 0x1ad: 0x0012, 0x1ae: 0x0012, 0x1af: 0x0012, + 0x1b0: 0x0015, 0x1b1: 0x0015, 0x1b2: 0x0015, 0x1b3: 0x0015, 0x1b4: 0x0015, 0x1b5: 0x0015, + 0x1b6: 0x0015, 0x1b7: 0x0015, 0x1b8: 0x0015, 0x1b9: 0x0014, 0x1ba: 0x0014, 0x1bb: 0x0014, + 0x1bc: 0x0014, 0x1bd: 0x0014, 0x1be: 0x0014, 0x1bf: 0x0014, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0024, 0x1c1: 0x0024, 0x1c2: 0x0024, 0x1c3: 0x0024, 0x1c4: 0x0024, 0x1c5: 0x1d6d, + 0x1c6: 0x0024, 0x1c7: 0x0034, 0x1c8: 0x0034, 0x1c9: 0x0034, 0x1ca: 0x0024, 0x1cb: 0x0024, + 0x1cc: 0x0024, 0x1cd: 0x0034, 0x1ce: 0x0034, 0x1cf: 0x0014, 0x1d0: 0x0024, 0x1d1: 0x0024, + 0x1d2: 0x0024, 0x1d3: 0x0034, 0x1d4: 0x0034, 0x1d5: 0x0034, 0x1d6: 0x0034, 0x1d7: 0x0024, + 0x1d8: 0x0034, 0x1d9: 0x0034, 0x1da: 0x0034, 0x1db: 0x0024, 0x1dc: 0x0034, 0x1dd: 0x0034, + 0x1de: 0x0034, 0x1df: 0x0034, 0x1e0: 0x0034, 0x1e1: 0x0034, 0x1e2: 0x0034, 0x1e3: 0x0024, + 0x1e4: 0x0024, 0x1e5: 0x0024, 0x1e6: 0x0024, 0x1e7: 0x0024, 0x1e8: 0x0024, 0x1e9: 0x0024, + 0x1ea: 0x0024, 0x1eb: 0x0024, 0x1ec: 0x0024, 0x1ed: 0x0024, 0x1ee: 0x0024, 0x1ef: 0x0024, + 0x1f0: 0x0113, 0x1f1: 0x0112, 0x1f2: 0x0113, 0x1f3: 0x0112, 0x1f4: 0x0014, 0x1f5: 0x0004, + 0x1f6: 0x0113, 0x1f7: 0x0112, 0x1fa: 0x0015, 0x1fb: 0x4d52, + 0x1fc: 0x5052, 0x1fd: 0x5052, 0x1ff: 0x5353, + // Block 0x8, offset 0x200 + 0x204: 0x0004, 0x205: 0x0004, + 0x206: 0x2a13, 0x207: 0x0054, 0x208: 0x2513, 0x209: 0x2713, 0x20a: 0x2513, + 0x20c: 0x5653, 0x20e: 0x5953, 0x20f: 0x5c53, 0x210: 0x1e2a, 0x211: 0x2013, + 0x212: 0x2013, 0x213: 0x2013, 0x214: 0x2013, 0x215: 0x2013, 0x216: 0x2013, 0x217: 0x2013, + 0x218: 0x2013, 0x219: 0x2013, 0x21a: 0x2013, 0x21b: 0x2013, 0x21c: 0x2013, 0x21d: 0x2013, + 0x21e: 0x2013, 0x21f: 0x2013, 0x220: 0x5f53, 0x221: 0x5f53, 0x223: 0x5f53, + 0x224: 0x5f53, 0x225: 0x5f53, 0x226: 0x5f53, 0x227: 0x5f53, 0x228: 0x5f53, 0x229: 0x5f53, + 0x22a: 0x5f53, 0x22b: 0x5f53, 0x22c: 0x2a12, 0x22d: 0x2512, 0x22e: 0x2712, 0x22f: 0x2512, + 0x230: 0x1fea, 0x231: 0x2012, 0x232: 0x2012, 0x233: 0x2012, 0x234: 0x2012, 0x235: 0x2012, + 0x236: 0x2012, 0x237: 0x2012, 0x238: 0x2012, 0x239: 0x2012, 0x23a: 0x2012, 0x23b: 0x2012, + 0x23c: 0x2012, 0x23d: 0x2012, 0x23e: 0x2012, 0x23f: 0x2012, + // Block 0x9, offset 0x240 + 0x240: 0x5f52, 0x241: 0x5f52, 0x242: 0x21aa, 0x243: 0x5f52, 0x244: 0x5f52, 0x245: 0x5f52, + 0x246: 0x5f52, 0x247: 0x5f52, 0x248: 0x5f52, 0x249: 0x5f52, 0x24a: 0x5f52, 0x24b: 0x5f52, + 0x24c: 0x5652, 0x24d: 0x5952, 0x24e: 0x5c52, 0x24f: 0x1813, 0x250: 0x226a, 0x251: 0x232a, + 0x252: 0x0013, 0x253: 0x0013, 0x254: 0x0013, 0x255: 0x23ea, 0x256: 0x24aa, 0x257: 0x1812, + 0x258: 0x0113, 0x259: 0x0112, 0x25a: 0x0113, 0x25b: 0x0112, 0x25c: 0x0113, 0x25d: 0x0112, + 0x25e: 0x0113, 0x25f: 0x0112, 0x260: 0x0113, 0x261: 0x0112, 0x262: 0x0113, 0x263: 0x0112, + 0x264: 0x0113, 0x265: 0x0112, 0x266: 0x0113, 0x267: 0x0112, 0x268: 0x0113, 0x269: 0x0112, + 0x26a: 0x0113, 0x26b: 0x0112, 0x26c: 0x0113, 0x26d: 0x0112, 0x26e: 0x0113, 0x26f: 0x0112, + 0x270: 0x256a, 0x271: 0x262a, 0x272: 0x0b12, 0x273: 0x5352, 0x274: 0x6253, 0x275: 0x26ea, + 0x277: 0x0f13, 0x278: 0x0f12, 0x279: 0x0b13, 0x27a: 0x0113, 0x27b: 0x0112, + 0x27c: 0x0012, 0x27d: 0x4d53, 0x27e: 0x5053, 0x27f: 0x5053, + // Block 0xa, offset 0x280 + 0x280: 0x0812, 0x281: 0x0812, 0x282: 0x0812, 0x283: 0x0812, 0x284: 0x0812, 0x285: 0x0812, + 0x288: 0x0813, 0x289: 0x0813, 0x28a: 0x0813, 0x28b: 0x0813, + 0x28c: 0x0813, 0x28d: 0x0813, 0x290: 0x372a, 0x291: 0x0812, + 0x292: 0x386a, 0x293: 0x0812, 0x294: 0x3a2a, 0x295: 0x0812, 0x296: 0x3bea, 0x297: 0x0812, + 0x299: 0x0813, 0x29b: 0x0813, 0x29d: 0x0813, + 0x29f: 0x0813, 0x2a0: 0x0812, 0x2a1: 0x0812, 0x2a2: 0x0812, 0x2a3: 0x0812, + 0x2a4: 0x0812, 0x2a5: 0x0812, 0x2a6: 0x0812, 0x2a7: 0x0812, 0x2a8: 0x0813, 0x2a9: 0x0813, + 0x2aa: 0x0813, 0x2ab: 0x0813, 0x2ac: 0x0813, 0x2ad: 0x0813, 0x2ae: 0x0813, 0x2af: 0x0813, + 0x2b0: 0x8b52, 0x2b1: 0x8b52, 0x2b2: 0x8e52, 0x2b3: 0x8e52, 0x2b4: 0x9152, 0x2b5: 0x9152, + 0x2b6: 0x9452, 0x2b7: 0x9452, 0x2b8: 0x9752, 0x2b9: 0x9752, 0x2ba: 0x9a52, 0x2bb: 0x9a52, + 0x2bc: 0x4d52, 0x2bd: 0x4d52, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3daa, 0x2c1: 0x3f8a, 0x2c2: 0x416a, 0x2c3: 0x434a, 0x2c4: 0x452a, 0x2c5: 0x470a, + 0x2c6: 0x48ea, 0x2c7: 0x4aca, 0x2c8: 0x4ca9, 0x2c9: 0x4e89, 0x2ca: 0x5069, 0x2cb: 0x5249, + 0x2cc: 0x5429, 0x2cd: 0x5609, 0x2ce: 0x57e9, 0x2cf: 0x59c9, 0x2d0: 0x5baa, 0x2d1: 0x5d8a, + 0x2d2: 0x5f6a, 0x2d3: 0x614a, 0x2d4: 0x632a, 0x2d5: 0x650a, 0x2d6: 0x66ea, 0x2d7: 0x68ca, + 0x2d8: 0x6aa9, 0x2d9: 0x6c89, 0x2da: 0x6e69, 0x2db: 0x7049, 0x2dc: 0x7229, 0x2dd: 0x7409, + 0x2de: 0x75e9, 0x2df: 0x77c9, 0x2e0: 0x79aa, 0x2e1: 0x7b8a, 0x2e2: 0x7d6a, 0x2e3: 0x7f4a, + 0x2e4: 0x812a, 0x2e5: 0x830a, 0x2e6: 0x84ea, 0x2e7: 0x86ca, 0x2e8: 0x88a9, 0x2e9: 0x8a89, + 0x2ea: 0x8c69, 0x2eb: 0x8e49, 0x2ec: 0x9029, 0x2ed: 0x9209, 0x2ee: 0x93e9, 0x2ef: 0x95c9, + 0x2f0: 0x0812, 0x2f1: 0x0812, 0x2f2: 0x97aa, 0x2f3: 0x99ca, 0x2f4: 0x9b6a, + 0x2f6: 0x9d2a, 0x2f7: 0x9e6a, 0x2f8: 0x0813, 0x2f9: 0x0813, 0x2fa: 0x8b53, 0x2fb: 0x8b53, + 0x2fc: 0xa0e9, 0x2fd: 0x0004, 0x2fe: 0xa28a, 0x2ff: 0x0004, + // Block 0xc, offset 0x300 + 0x300: 0x0004, 0x301: 0x0004, 0x302: 0xa34a, 0x303: 0xa56a, 0x304: 0xa70a, + 0x306: 0xa8ca, 0x307: 0xaa0a, 0x308: 0x8e53, 0x309: 0x8e53, 0x30a: 0x9153, 0x30b: 0x9153, + 0x30c: 0xac89, 0x30d: 0x0004, 0x30e: 0x0004, 0x30f: 0x0004, 0x310: 0x0812, 0x311: 0x0812, + 0x312: 0xae2a, 0x313: 0xafea, 0x316: 0xb1aa, 0x317: 0xb2ea, + 0x318: 0x0813, 0x319: 0x0813, 0x31a: 0x9453, 0x31b: 0x9453, 0x31d: 0x0004, + 0x31e: 0x0004, 0x31f: 0x0004, 0x320: 0x0812, 0x321: 0x0812, 0x322: 0xb4aa, 0x323: 0xb66a, + 0x324: 0xb82a, 0x325: 0x0912, 0x326: 0xb96a, 0x327: 0xbaaa, 0x328: 0x0813, 0x329: 0x0813, + 0x32a: 0x9a53, 0x32b: 0x9a53, 0x32c: 0x0913, 0x32d: 0x0004, 0x32e: 0x0004, 0x32f: 0x0004, + 0x332: 0xbc6a, 0x333: 0xbe8a, 0x334: 0xc02a, + 0x336: 0xc1ea, 0x337: 0xc32a, 0x338: 0x9753, 0x339: 0x9753, 0x33a: 0x4d53, 0x33b: 0x4d53, + 0x33c: 0xc5a9, 0x33d: 0x0004, 0x33e: 0x0004, + // Block 0xd, offset 0x340 + 0x342: 0x0013, + 0x347: 0x0013, 0x34a: 0x0012, 0x34b: 0x0013, + 0x34c: 0x0013, 0x34d: 0x0013, 0x34e: 0x0012, 0x34f: 0x0012, 0x350: 0x0013, 0x351: 0x0013, + 0x352: 0x0013, 0x353: 0x0012, 0x355: 0x0013, + 0x359: 0x0013, 0x35a: 0x0013, 0x35b: 0x0013, 0x35c: 0x0013, 0x35d: 0x0013, + 0x364: 0x0013, 0x366: 0xc74b, 0x368: 0x0013, + 0x36a: 0xc80b, 0x36b: 0xc88b, 0x36c: 0x0013, 0x36d: 0x0013, 0x36f: 0x0012, + 0x370: 0x0013, 0x371: 0x0013, 0x372: 0x9d53, 0x373: 0x0013, 0x374: 0x0012, 0x375: 0x0010, + 0x376: 0x0010, 0x377: 0x0010, 0x378: 0x0010, 0x379: 0x0012, + 0x37c: 0x0012, 0x37d: 0x0012, 0x37e: 0x0013, 0x37f: 0x0013, + // Block 0xe, offset 0x380 + 0x380: 0x1a13, 0x381: 0x1a13, 0x382: 0x1e13, 0x383: 0x1e13, 0x384: 0x1a13, 0x385: 0x1a13, + 0x386: 0x2613, 0x387: 0x2613, 0x388: 0x2a13, 0x389: 0x2a13, 0x38a: 0x2e13, 0x38b: 0x2e13, + 0x38c: 0x2a13, 0x38d: 0x2a13, 0x38e: 0x2613, 0x38f: 0x2613, 0x390: 0xa052, 0x391: 0xa052, + 0x392: 0xa352, 0x393: 0xa352, 0x394: 0xa652, 0x395: 0xa652, 0x396: 0xa352, 0x397: 0xa352, + 0x398: 0xa052, 0x399: 0xa052, 0x39a: 0x1a12, 0x39b: 0x1a12, 0x39c: 0x1e12, 0x39d: 0x1e12, + 0x39e: 0x1a12, 0x39f: 0x1a12, 0x3a0: 0x2612, 0x3a1: 0x2612, 0x3a2: 0x2a12, 0x3a3: 0x2a12, + 0x3a4: 0x2e12, 0x3a5: 0x2e12, 0x3a6: 0x2a12, 0x3a7: 0x2a12, 0x3a8: 0x2612, 0x3a9: 0x2612, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x6552, 0x3c1: 0x6552, 0x3c2: 0x6552, 0x3c3: 0x6552, 0x3c4: 0x6552, 0x3c5: 0x6552, + 0x3c6: 0x6552, 0x3c7: 0x6552, 0x3c8: 0x6552, 0x3c9: 0x6552, 0x3ca: 0x6552, 0x3cb: 0x6552, + 0x3cc: 0x6552, 0x3cd: 0x6552, 0x3ce: 0x6552, 0x3cf: 0x6552, 0x3d0: 0xa952, 0x3d1: 0xa952, + 0x3d2: 0xa952, 0x3d3: 0xa952, 0x3d4: 0xa952, 0x3d5: 0xa952, 0x3d6: 0xa952, 0x3d7: 0xa952, + 0x3d8: 0xa952, 0x3d9: 0xa952, 0x3da: 0xa952, 0x3db: 0xa952, 0x3dc: 0xa952, 0x3dd: 0xa952, + 0x3de: 0xa952, 0x3e0: 0x0113, 0x3e1: 0x0112, 0x3e2: 0xc94b, 0x3e3: 0x8853, + 0x3e4: 0xca0b, 0x3e5: 0xcaca, 0x3e6: 0xcb4a, 0x3e7: 0x0f13, 0x3e8: 0x0f12, 0x3e9: 0x0313, + 0x3ea: 0x0312, 0x3eb: 0x0713, 0x3ec: 0x0712, 0x3ed: 0xcbcb, 0x3ee: 0xcc8b, 0x3ef: 0xcd4b, + 0x3f0: 0xce0b, 0x3f1: 0x0012, 0x3f2: 0x0113, 0x3f3: 0x0112, 0x3f4: 0x0012, 0x3f5: 0x0313, + 0x3f6: 0x0312, 0x3f7: 0x0012, 0x3f8: 0x0012, 0x3f9: 0x0012, 0x3fa: 0x0012, 0x3fb: 0x0012, + 0x3fc: 0x0015, 0x3fd: 0x0015, 0x3fe: 0xcecb, 0x3ff: 0xcf8b, + // Block 0x10, offset 0x400 + 0x400: 0x0113, 0x401: 0x0112, 0x402: 0x0113, 0x403: 0x0112, 0x404: 0x0113, 0x405: 0x0112, + 0x406: 0x0113, 0x407: 0x0112, 0x408: 0x0014, 0x409: 0x0004, 0x40a: 0x0004, 0x40b: 0x0713, + 0x40c: 0x0712, 0x40d: 0xd04b, 0x40e: 0x0012, 0x40f: 0x0010, 0x410: 0x0113, 0x411: 0x0112, + 0x412: 0x0113, 0x413: 0x0112, 0x414: 0x0012, 0x415: 0x0012, 0x416: 0x0113, 0x417: 0x0112, + 0x418: 0x0113, 0x419: 0x0112, 0x41a: 0x0113, 0x41b: 0x0112, 0x41c: 0x0113, 0x41d: 0x0112, + 0x41e: 0x0113, 0x41f: 0x0112, 0x420: 0x0113, 0x421: 0x0112, 0x422: 0x0113, 0x423: 0x0112, + 0x424: 0x0113, 0x425: 0x0112, 0x426: 0x0113, 0x427: 0x0112, 0x428: 0x0113, 0x429: 0x0112, + 0x42a: 0xd10b, 0x42b: 0xd1cb, 0x42c: 0xd28b, 0x42d: 0xd34b, 0x42e: 0xd40b, + 0x430: 0xd4cb, 0x431: 0xd58b, 0x432: 0xd64b, 0x433: 0xac53, 0x434: 0x0113, 0x435: 0x0112, + 0x436: 0x0113, 0x437: 0x0112, + // Block 0x11, offset 0x440 + 0x440: 0xd70a, 0x441: 0xd80a, 0x442: 0xd90a, 0x443: 0xda0a, 0x444: 0xdb6a, 0x445: 0xdcca, + 0x446: 0xddca, + 0x453: 0xdeca, 0x454: 0xe08a, 0x455: 0xe24a, 0x456: 0xe40a, 0x457: 0xe5ca, + 0x45d: 0x0010, + 0x45e: 0x0034, 0x45f: 0x0010, 0x460: 0x0010, 0x461: 0x0010, 0x462: 0x0010, 0x463: 0x0010, + 0x464: 0x0010, 0x465: 0x0010, 0x466: 0x0010, 0x467: 0x0010, 0x468: 0x0010, + 0x46a: 0x0010, 0x46b: 0x0010, 0x46c: 0x0010, 0x46d: 0x0010, 0x46e: 0x0010, 0x46f: 0x0010, + 0x470: 0x0010, 0x471: 0x0010, 0x472: 0x0010, 0x473: 0x0010, 0x474: 0x0010, 0x475: 0x0010, + 0x476: 0x0010, 0x478: 0x0010, 0x479: 0x0010, 0x47a: 0x0010, 0x47b: 0x0010, + 0x47c: 0x0010, 0x47e: 0x0010, + // Block 0x12, offset 0x480 + 0x480: 0x2213, 0x481: 0x2213, 0x482: 0x2613, 0x483: 0x2613, 0x484: 0x2213, 0x485: 0x2213, + 0x486: 0x2e13, 0x487: 0x2e13, 0x488: 0x2213, 0x489: 0x2213, 0x48a: 0x2613, 0x48b: 0x2613, + 0x48c: 0x2213, 0x48d: 0x2213, 0x48e: 0x3e13, 0x48f: 0x3e13, 0x490: 0x2213, 0x491: 0x2213, + 0x492: 0x2613, 0x493: 0x2613, 0x494: 0x2213, 0x495: 0x2213, 0x496: 0x2e13, 0x497: 0x2e13, + 0x498: 0x2213, 0x499: 0x2213, 0x49a: 0x2613, 0x49b: 0x2613, 0x49c: 0x2213, 0x49d: 0x2213, + 0x49e: 0xb553, 0x49f: 0xb553, 0x4a0: 0xb853, 0x4a1: 0xb853, 0x4a2: 0x2212, 0x4a3: 0x2212, + 0x4a4: 0x2612, 0x4a5: 0x2612, 0x4a6: 0x2212, 0x4a7: 0x2212, 0x4a8: 0x2e12, 0x4a9: 0x2e12, + 0x4aa: 0x2212, 0x4ab: 0x2212, 0x4ac: 0x2612, 0x4ad: 0x2612, 0x4ae: 0x2212, 0x4af: 0x2212, + 0x4b0: 0x3e12, 0x4b1: 0x3e12, 0x4b2: 0x2212, 0x4b3: 0x2212, 0x4b4: 0x2612, 0x4b5: 0x2612, + 0x4b6: 0x2212, 0x4b7: 0x2212, 0x4b8: 0x2e12, 0x4b9: 0x2e12, 0x4ba: 0x2212, 0x4bb: 0x2212, + 0x4bc: 0x2612, 0x4bd: 0x2612, 0x4be: 0x2212, 0x4bf: 0x2212, + // Block 0x13, offset 0x4c0 + 0x4c2: 0x0010, + 0x4c7: 0x0010, 0x4c9: 0x0010, 0x4cb: 0x0010, + 0x4cd: 0x0010, 0x4ce: 0x0010, 0x4cf: 0x0010, 0x4d1: 0x0010, + 0x4d2: 0x0010, 0x4d4: 0x0010, 0x4d7: 0x0010, + 0x4d9: 0x0010, 0x4db: 0x0010, 0x4dd: 0x0010, + 0x4df: 0x0010, 0x4e1: 0x0010, 0x4e2: 0x0010, + 0x4e4: 0x0010, 0x4e7: 0x0010, 0x4e8: 0x0010, 0x4e9: 0x0010, + 0x4ea: 0x0010, 0x4ec: 0x0010, 0x4ed: 0x0010, 0x4ee: 0x0010, 0x4ef: 0x0010, + 0x4f0: 0x0010, 0x4f1: 0x0010, 0x4f2: 0x0010, 0x4f4: 0x0010, 0x4f5: 0x0010, + 0x4f6: 0x0010, 0x4f7: 0x0010, 0x4f9: 0x0010, 0x4fa: 0x0010, 0x4fb: 0x0010, + 0x4fc: 0x0010, 0x4fe: 0x0010, +} + +// caseIndex: 25 blocks, 1600 entries, 3200 bytes +// Block 0 is the zero block. +var caseIndex = [1600]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x12, 0xc3: 0x13, 0xc4: 0x14, 0xc5: 0x15, 0xc6: 0x01, 0xc7: 0x02, + 0xc8: 0x16, 0xc9: 0x03, 0xca: 0x04, 0xcb: 0x17, 0xcc: 0x18, 0xcd: 0x05, 0xce: 0x06, 0xcf: 0x07, + 0xd0: 0x19, 0xd1: 0x1a, 0xd2: 0x1b, 0xd3: 0x1c, 0xd4: 0x1d, 0xd5: 0x1e, 0xd6: 0x1f, 0xd7: 0x20, + 0xd8: 0x21, 0xd9: 0x22, 0xda: 0x23, 0xdb: 0x24, 0xdc: 0x25, 0xdd: 0x26, 0xde: 0x27, 0xdf: 0x28, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x08, 0xef: 0x09, + 0xf0: 0x14, 0xf3: 0x16, + // Block 0x4, offset 0x100 + 0x120: 0x29, 0x121: 0x2a, 0x122: 0x2b, 0x123: 0x2c, 0x124: 0x2d, 0x125: 0x2e, 0x126: 0x2f, 0x127: 0x30, + 0x128: 0x31, 0x129: 0x32, 0x12a: 0x33, 0x12b: 0x34, 0x12c: 0x35, 0x12d: 0x36, 0x12e: 0x37, 0x12f: 0x38, + 0x130: 0x39, 0x131: 0x3a, 0x132: 0x3b, 0x133: 0x3c, 0x134: 0x3d, 0x135: 0x3e, 0x136: 0x3f, 0x137: 0x40, + 0x138: 0x41, 0x139: 0x42, 0x13a: 0x43, 0x13b: 0x44, 0x13c: 0x45, 0x13d: 0x46, 0x13e: 0x47, 0x13f: 0x48, + // Block 0x5, offset 0x140 + 0x140: 0x49, 0x141: 0x4a, 0x142: 0x4b, 0x143: 0x4c, 0x144: 0x23, 0x145: 0x23, 0x146: 0x23, 0x147: 0x23, + 0x148: 0x23, 0x149: 0x4d, 0x14a: 0x4e, 0x14b: 0x4f, 0x14c: 0x50, 0x14d: 0x51, 0x14e: 0x52, 0x14f: 0x53, + 0x150: 0x54, 0x151: 0x23, 0x152: 0x23, 0x153: 0x23, 0x154: 0x23, 0x155: 0x23, 0x156: 0x23, 0x157: 0x23, + 0x158: 0x23, 0x159: 0x55, 0x15a: 0x56, 0x15b: 0x57, 0x15c: 0x58, 0x15d: 0x59, 0x15e: 0x5a, 0x15f: 0x5b, + 0x160: 0x5c, 0x161: 0x5d, 0x162: 0x5e, 0x163: 0x5f, 0x164: 0x60, 0x165: 0x61, 0x167: 0x62, + 0x168: 0x63, 0x169: 0x64, 0x16a: 0x65, 0x16c: 0x66, 0x16d: 0x67, 0x16e: 0x68, 0x16f: 0x69, + 0x170: 0x6a, 0x171: 0x6b, 0x172: 0x6c, 0x173: 0x6d, 0x174: 0x6e, 0x175: 0x6f, 0x176: 0x70, 0x177: 0x71, + 0x178: 0x72, 0x179: 0x72, 0x17a: 0x73, 0x17b: 0x72, 0x17c: 0x74, 0x17d: 0x08, 0x17e: 0x09, 0x17f: 0x0a, + // Block 0x6, offset 0x180 + 0x180: 0x75, 0x181: 0x76, 0x182: 0x77, 0x183: 0x78, 0x184: 0x0b, 0x185: 0x79, 0x186: 0x7a, + 0x192: 0x7b, 0x193: 0x0c, + 0x1b0: 0x7c, 0x1b1: 0x0d, 0x1b2: 0x72, 0x1b3: 0x7d, 0x1b4: 0x7e, 0x1b5: 0x7f, 0x1b6: 0x80, 0x1b7: 0x81, + 0x1b8: 0x82, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x83, 0x1c2: 0x84, 0x1c3: 0x85, 0x1c4: 0x86, 0x1c5: 0x23, 0x1c6: 0x87, + // Block 0x8, offset 0x200 + 0x200: 0x88, 0x201: 0x23, 0x202: 0x23, 0x203: 0x23, 0x204: 0x23, 0x205: 0x23, 0x206: 0x23, 0x207: 0x23, + 0x208: 0x23, 0x209: 0x23, 0x20a: 0x23, 0x20b: 0x23, 0x20c: 0x23, 0x20d: 0x23, 0x20e: 0x23, 0x20f: 0x23, + 0x210: 0x23, 0x211: 0x23, 0x212: 0x89, 0x213: 0x8a, 0x214: 0x23, 0x215: 0x23, 0x216: 0x23, 0x217: 0x23, + 0x218: 0x8b, 0x219: 0x8c, 0x21a: 0x8d, 0x21b: 0x8e, 0x21c: 0x8f, 0x21d: 0x90, 0x21e: 0x0e, 0x21f: 0x91, + 0x220: 0x92, 0x221: 0x93, 0x222: 0x23, 0x223: 0x94, 0x224: 0x95, 0x225: 0x96, 0x226: 0x97, 0x227: 0x98, + 0x228: 0x99, 0x229: 0x9a, 0x22a: 0x9b, 0x22b: 0x9c, 0x22c: 0x9d, 0x22d: 0x9e, 0x22e: 0x9f, 0x22f: 0xa0, + 0x230: 0x23, 0x231: 0x23, 0x232: 0x23, 0x233: 0x23, 0x234: 0x23, 0x235: 0x23, 0x236: 0x23, 0x237: 0x23, + 0x238: 0x23, 0x239: 0x23, 0x23a: 0x23, 0x23b: 0x23, 0x23c: 0x23, 0x23d: 0x23, 0x23e: 0x23, 0x23f: 0x23, + // Block 0x9, offset 0x240 + 0x240: 0x23, 0x241: 0x23, 0x242: 0x23, 0x243: 0x23, 0x244: 0x23, 0x245: 0x23, 0x246: 0x23, 0x247: 0x23, + 0x248: 0x23, 0x249: 0x23, 0x24a: 0x23, 0x24b: 0x23, 0x24c: 0x23, 0x24d: 0x23, 0x24e: 0x23, 0x24f: 0x23, + 0x250: 0x23, 0x251: 0x23, 0x252: 0x23, 0x253: 0x23, 0x254: 0x23, 0x255: 0x23, 0x256: 0x23, 0x257: 0x23, + 0x258: 0x23, 0x259: 0x23, 0x25a: 0x23, 0x25b: 0x23, 0x25c: 0x23, 0x25d: 0x23, 0x25e: 0x23, 0x25f: 0x23, + 0x260: 0x23, 0x261: 0x23, 0x262: 0x23, 0x263: 0x23, 0x264: 0x23, 0x265: 0x23, 0x266: 0x23, 0x267: 0x23, + 0x268: 0x23, 0x269: 0x23, 0x26a: 0x23, 0x26b: 0x23, 0x26c: 0x23, 0x26d: 0x23, 0x26e: 0x23, 0x26f: 0x23, + 0x270: 0x23, 0x271: 0x23, 0x272: 0x23, 0x273: 0x23, 0x274: 0x23, 0x275: 0x23, 0x276: 0x23, 0x277: 0x23, + 0x278: 0x23, 0x279: 0x23, 0x27a: 0x23, 0x27b: 0x23, 0x27c: 0x23, 0x27d: 0x23, 0x27e: 0x23, 0x27f: 0x23, + // Block 0xa, offset 0x280 + 0x280: 0x23, 0x281: 0x23, 0x282: 0x23, 0x283: 0x23, 0x284: 0x23, 0x285: 0x23, 0x286: 0x23, 0x287: 0x23, + 0x288: 0x23, 0x289: 0x23, 0x28a: 0x23, 0x28b: 0x23, 0x28c: 0x23, 0x28d: 0x23, 0x28e: 0x23, 0x28f: 0x23, + 0x290: 0x23, 0x291: 0x23, 0x292: 0x23, 0x293: 0x23, 0x294: 0x23, 0x295: 0x23, 0x296: 0x23, 0x297: 0x23, + 0x298: 0x23, 0x299: 0x23, 0x29a: 0x23, 0x29b: 0x23, 0x29c: 0x23, 0x29d: 0x23, 0x29e: 0xa1, 0x29f: 0xa2, + // Block 0xb, offset 0x2c0 + 0x2ec: 0x0f, 0x2ed: 0xa3, 0x2ee: 0xa4, 0x2ef: 0xa5, + 0x2f0: 0x23, 0x2f1: 0x23, 0x2f2: 0x23, 0x2f3: 0x23, 0x2f4: 0xa6, 0x2f5: 0xa7, 0x2f6: 0xa8, 0x2f7: 0xa9, + 0x2f8: 0xaa, 0x2f9: 0xab, 0x2fa: 0x23, 0x2fb: 0xac, 0x2fc: 0xad, 0x2fd: 0xae, 0x2fe: 0xaf, 0x2ff: 0xb0, + // Block 0xc, offset 0x300 + 0x300: 0xb1, 0x301: 0xb2, 0x302: 0x23, 0x303: 0xb3, 0x305: 0xb4, 0x307: 0xb5, + 0x30a: 0xb6, 0x30b: 0xb7, 0x30c: 0xb8, 0x30d: 0xb9, 0x30e: 0xba, 0x30f: 0xbb, + 0x310: 0xbc, 0x311: 0xbd, 0x312: 0xbe, 0x313: 0xbf, 0x314: 0xc0, 0x315: 0xc1, + 0x318: 0x23, 0x319: 0x23, 0x31a: 0x23, 0x31b: 0x23, 0x31c: 0xc2, 0x31d: 0xc3, + 0x320: 0xc4, 0x321: 0xc5, 0x322: 0xc6, 0x323: 0xc7, 0x324: 0xc8, 0x326: 0xc9, + 0x328: 0xca, 0x329: 0xcb, 0x32a: 0xcc, 0x32b: 0xcd, 0x32c: 0x5f, 0x32d: 0xce, 0x32e: 0xcf, + 0x330: 0x23, 0x331: 0xd0, 0x332: 0xd1, 0x333: 0xd2, + // Block 0xd, offset 0x340 + 0x340: 0xd3, 0x341: 0xd4, 0x342: 0xd5, 0x343: 0xd6, 0x344: 0xd7, 0x345: 0xd8, 0x346: 0xd9, 0x347: 0xda, + 0x348: 0xdb, 0x34a: 0xdc, 0x34b: 0xdd, 0x34c: 0xde, 0x34d: 0xdf, + 0x350: 0xe0, 0x351: 0xe1, 0x352: 0xe2, 0x353: 0xe3, 0x356: 0xe4, 0x357: 0xe5, + 0x358: 0xe6, 0x359: 0xe7, 0x35a: 0xe8, 0x35b: 0xe9, 0x35c: 0xea, + 0x362: 0xeb, 0x363: 0xec, + 0x36b: 0xed, + 0x370: 0xee, 0x371: 0xef, 0x372: 0xf0, + // Block 0xe, offset 0x380 + 0x380: 0x23, 0x381: 0x23, 0x382: 0x23, 0x383: 0x23, 0x384: 0x23, 0x385: 0x23, 0x386: 0x23, 0x387: 0x23, + 0x388: 0x23, 0x389: 0x23, 0x38a: 0x23, 0x38b: 0x23, 0x38c: 0x23, 0x38d: 0x23, 0x38e: 0xf1, + 0x390: 0x23, 0x391: 0xf2, 0x392: 0x23, 0x393: 0x23, 0x394: 0x23, 0x395: 0xf3, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x23, 0x3c1: 0x23, 0x3c2: 0x23, 0x3c3: 0x23, 0x3c4: 0x23, 0x3c5: 0x23, 0x3c6: 0x23, 0x3c7: 0x23, + 0x3c8: 0x23, 0x3c9: 0x23, 0x3ca: 0x23, 0x3cb: 0x23, 0x3cc: 0x23, 0x3cd: 0x23, 0x3ce: 0x23, 0x3cf: 0x23, + 0x3d0: 0xf2, + // Block 0x10, offset 0x400 + 0x410: 0x23, 0x411: 0x23, 0x412: 0x23, 0x413: 0x23, 0x414: 0x23, 0x415: 0x23, 0x416: 0x23, 0x417: 0x23, + 0x418: 0x23, 0x419: 0xf4, + // Block 0x11, offset 0x440 + 0x460: 0x23, 0x461: 0x23, 0x462: 0x23, 0x463: 0x23, 0x464: 0x23, 0x465: 0x23, 0x466: 0x23, 0x467: 0x23, + 0x468: 0xed, 0x469: 0xf5, 0x46b: 0xf6, 0x46c: 0xf7, 0x46d: 0xf8, 0x46e: 0xf9, + 0x47c: 0x23, 0x47d: 0xfa, 0x47e: 0xfb, 0x47f: 0xfc, + // Block 0x12, offset 0x480 + 0x4b0: 0x23, 0x4b1: 0xfd, 0x4b2: 0xfe, + // Block 0x13, offset 0x4c0 + 0x4c5: 0xff, 0x4c6: 0x100, + 0x4c9: 0x101, + 0x4d0: 0x102, 0x4d1: 0x103, 0x4d2: 0x104, 0x4d3: 0x105, 0x4d4: 0x106, 0x4d5: 0x107, 0x4d6: 0x108, 0x4d7: 0x109, + 0x4d8: 0x10a, 0x4d9: 0x10b, 0x4da: 0x10c, 0x4db: 0x10d, 0x4dc: 0x10e, 0x4dd: 0x10f, 0x4de: 0x110, 0x4df: 0x111, + 0x4e8: 0x112, 0x4e9: 0x113, 0x4ea: 0x114, + // Block 0x14, offset 0x500 + 0x500: 0x115, + 0x520: 0x23, 0x521: 0x23, 0x522: 0x23, 0x523: 0x116, 0x524: 0x10, 0x525: 0x117, + 0x538: 0x118, 0x539: 0x11, 0x53a: 0x119, + // Block 0x15, offset 0x540 + 0x544: 0x11a, 0x545: 0x11b, 0x546: 0x11c, + 0x54f: 0x11d, + // Block 0x16, offset 0x580 + 0x590: 0x0a, 0x591: 0x0b, 0x592: 0x0c, 0x593: 0x0d, 0x594: 0x0e, 0x596: 0x0f, + 0x59b: 0x10, 0x59d: 0x11, 0x59e: 0x12, 0x59f: 0x13, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x11e, 0x5c1: 0x11f, 0x5c4: 0x11f, 0x5c5: 0x11f, 0x5c6: 0x11f, 0x5c7: 0x120, + // Block 0x18, offset 0x600 + 0x620: 0x15, +} + +// sparseOffsets: 272 entries, 544 bytes +var sparseOffsets = []uint16{0x0, 0x9, 0xf, 0x18, 0x24, 0x2e, 0x3a, 0x3d, 0x41, 0x44, 0x48, 0x52, 0x54, 0x59, 0x69, 0x70, 0x75, 0x83, 0x84, 0x92, 0xa1, 0xab, 0xae, 0xb4, 0xbc, 0xbe, 0xc0, 0xce, 0xd4, 0xe2, 0xed, 0xf8, 0x103, 0x10f, 0x119, 0x124, 0x12f, 0x13b, 0x147, 0x14f, 0x157, 0x161, 0x16c, 0x178, 0x17e, 0x189, 0x18e, 0x196, 0x199, 0x19e, 0x1a2, 0x1a6, 0x1ad, 0x1b6, 0x1be, 0x1bf, 0x1c8, 0x1cf, 0x1d7, 0x1dd, 0x1e3, 0x1e8, 0x1ec, 0x1ef, 0x1f1, 0x1f4, 0x1f9, 0x1fa, 0x1fc, 0x1fe, 0x200, 0x207, 0x20c, 0x210, 0x219, 0x21c, 0x21f, 0x225, 0x226, 0x231, 0x232, 0x233, 0x238, 0x245, 0x24d, 0x255, 0x25e, 0x267, 0x270, 0x275, 0x278, 0x281, 0x28e, 0x290, 0x297, 0x299, 0x2a4, 0x2a5, 0x2b0, 0x2b8, 0x2c0, 0x2c6, 0x2c7, 0x2d5, 0x2da, 0x2dd, 0x2e2, 0x2e6, 0x2ec, 0x2f1, 0x2f4, 0x2f9, 0x2fe, 0x2ff, 0x305, 0x307, 0x308, 0x30a, 0x30c, 0x30f, 0x310, 0x312, 0x315, 0x31b, 0x31f, 0x321, 0x327, 0x32e, 0x332, 0x33b, 0x33c, 0x344, 0x348, 0x34d, 0x355, 0x35b, 0x361, 0x36b, 0x370, 0x379, 0x37f, 0x386, 0x38a, 0x392, 0x394, 0x396, 0x399, 0x39b, 0x39d, 0x39e, 0x39f, 0x3a1, 0x3a3, 0x3a9, 0x3ae, 0x3b0, 0x3b6, 0x3b9, 0x3bb, 0x3c1, 0x3c6, 0x3c8, 0x3c9, 0x3ca, 0x3cb, 0x3cd, 0x3cf, 0x3d1, 0x3d4, 0x3d6, 0x3d9, 0x3e1, 0x3e4, 0x3e8, 0x3f0, 0x3f2, 0x3f3, 0x3f4, 0x3f6, 0x3fc, 0x3fe, 0x3ff, 0x401, 0x403, 0x405, 0x412, 0x413, 0x414, 0x418, 0x41a, 0x41b, 0x41c, 0x41d, 0x41e, 0x422, 0x426, 0x42c, 0x42e, 0x435, 0x438, 0x43c, 0x442, 0x44b, 0x451, 0x457, 0x461, 0x46b, 0x46d, 0x474, 0x47a, 0x480, 0x486, 0x489, 0x48f, 0x492, 0x49a, 0x49b, 0x4a2, 0x4a3, 0x4a6, 0x4a7, 0x4ad, 0x4b0, 0x4b8, 0x4b9, 0x4ba, 0x4bb, 0x4bc, 0x4be, 0x4c0, 0x4c2, 0x4c6, 0x4c7, 0x4c9, 0x4ca, 0x4cb, 0x4cd, 0x4d2, 0x4d7, 0x4db, 0x4dc, 0x4df, 0x4e3, 0x4ee, 0x4f2, 0x4fa, 0x4ff, 0x503, 0x506, 0x50a, 0x50d, 0x510, 0x515, 0x519, 0x51d, 0x521, 0x525, 0x527, 0x529, 0x52c, 0x531, 0x533, 0x538, 0x541, 0x546, 0x547, 0x54a, 0x54b, 0x54c, 0x54e, 0x54f, 0x550} + +// sparseValues: 1360 entries, 5440 bytes +var sparseValues = [1360]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0004, lo: 0xa8, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xaa}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0004, lo: 0xaf, hi: 0xaf}, + {value: 0x0004, lo: 0xb4, hi: 0xb4}, + {value: 0x002a, lo: 0xb5, hi: 0xb5}, + {value: 0x0054, lo: 0xb7, hi: 0xb7}, + {value: 0x0004, lo: 0xb8, hi: 0xb8}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + // Block 0x1, offset 0x9 + {value: 0x2013, lo: 0x80, hi: 0x96}, + {value: 0x2013, lo: 0x98, hi: 0x9e}, + {value: 0x00ea, lo: 0x9f, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xb6}, + {value: 0x2012, lo: 0xb8, hi: 0xbe}, + {value: 0x0252, lo: 0xbf, hi: 0xbf}, + // Block 0x2, offset 0xf + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x01eb, lo: 0xb0, hi: 0xb0}, + {value: 0x02ea, lo: 0xb1, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb7}, + {value: 0x0012, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x0553, lo: 0xbf, hi: 0xbf}, + // Block 0x3, offset 0x18 + {value: 0x0552, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x034a, lo: 0x89, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xb7}, + {value: 0x0253, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x044a, lo: 0xbf, hi: 0xbf}, + // Block 0x4, offset 0x24 + {value: 0x0117, lo: 0x80, hi: 0x9f}, + {value: 0x2f53, lo: 0xa0, hi: 0xa0}, + {value: 0x0012, lo: 0xa1, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xb3}, + {value: 0x0012, lo: 0xb4, hi: 0xb9}, + {value: 0x10cb, lo: 0xba, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x2953, lo: 0xbd, hi: 0xbd}, + {value: 0x11cb, lo: 0xbe, hi: 0xbe}, + {value: 0x12ca, lo: 0xbf, hi: 0xbf}, + // Block 0x5, offset 0x2e + {value: 0x0015, lo: 0x80, hi: 0x81}, + {value: 0x0004, lo: 0x82, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x91}, + {value: 0x0004, lo: 0x92, hi: 0x96}, + {value: 0x0054, lo: 0x97, hi: 0x97}, + {value: 0x0004, lo: 0x98, hi: 0x9f}, + {value: 0x0015, lo: 0xa0, hi: 0xa4}, + {value: 0x0004, lo: 0xa5, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xac}, + {value: 0x0004, lo: 0xad, hi: 0xad}, + {value: 0x0014, lo: 0xae, hi: 0xae}, + {value: 0x0004, lo: 0xaf, hi: 0xbf}, + // Block 0x6, offset 0x3a + {value: 0x0024, lo: 0x80, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbf}, + // Block 0x7, offset 0x3d + {value: 0x6553, lo: 0x80, hi: 0x8f}, + {value: 0x2013, lo: 0x90, hi: 0x9f}, + {value: 0x5f53, lo: 0xa0, hi: 0xaf}, + {value: 0x2012, lo: 0xb0, hi: 0xbf}, + // Block 0x8, offset 0x41 + {value: 0x5f52, lo: 0x80, hi: 0x8f}, + {value: 0x6552, lo: 0x90, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x9, offset 0x44 + {value: 0x0117, lo: 0x80, hi: 0x81}, + {value: 0x0024, lo: 0x83, hi: 0x87}, + {value: 0x0014, lo: 0x88, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xbf}, + // Block 0xa, offset 0x48 + {value: 0x0f13, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x0316, lo: 0x89, hi: 0x8a}, + {value: 0x0716, lo: 0x8b, hi: 0x8c}, + {value: 0x0316, lo: 0x8d, hi: 0x8e}, + {value: 0x0f12, lo: 0x8f, hi: 0x8f}, + {value: 0x0117, lo: 0x90, hi: 0xbf}, + // Block 0xb, offset 0x52 + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x6553, lo: 0xb1, hi: 0xbf}, + // Block 0xc, offset 0x54 + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6853, lo: 0x90, hi: 0x96}, + {value: 0x0014, lo: 0x99, hi: 0x99}, + {value: 0x6552, lo: 0xa1, hi: 0xaf}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0xd, offset 0x59 + {value: 0x6852, lo: 0x80, hi: 0x86}, + {value: 0x27aa, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0024, lo: 0x92, hi: 0x95}, + {value: 0x0034, lo: 0x96, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x99}, + {value: 0x0034, lo: 0x9a, hi: 0x9b}, + {value: 0x0024, lo: 0x9c, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa7}, + {value: 0x0024, lo: 0xa8, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xbd}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xe, offset 0x69 + {value: 0x0034, lo: 0x81, hi: 0x82}, + {value: 0x0024, lo: 0x84, hi: 0x84}, + {value: 0x0034, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb3}, + {value: 0x0054, lo: 0xb4, hi: 0xb4}, + // Block 0xf, offset 0x70 + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0024, lo: 0x90, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0014, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x10, offset 0x75 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9c}, + {value: 0x0024, lo: 0x9d, hi: 0x9e}, + {value: 0x0034, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x11, offset 0x83 + {value: 0x0010, lo: 0x80, hi: 0xbf}, + // Block 0x12, offset 0x84 + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0024, lo: 0x9f, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x13, offset 0x92 + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0034, lo: 0xb1, hi: 0xb1}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbd}, + {value: 0x0034, lo: 0xbe, hi: 0xbe}, + {value: 0x0024, lo: 0xbf, hi: 0xbf}, + // Block 0x14, offset 0xa1 + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0024, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x88}, + {value: 0x0024, lo: 0x89, hi: 0x8a}, + {value: 0x0010, lo: 0x8d, hi: 0xbf}, + // Block 0x15, offset 0xab + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x16, offset 0xae + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xb1}, + {value: 0x0034, lo: 0xb2, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + // Block 0x17, offset 0xb4 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x99}, + {value: 0x0014, lo: 0x9a, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0xa3}, + {value: 0x0014, lo: 0xa4, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xad}, + // Block 0x18, offset 0xbc + {value: 0x0010, lo: 0x80, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x9b}, + // Block 0x19, offset 0xbe + {value: 0x0010, lo: 0xa0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbd}, + // Block 0x1a, offset 0xc0 + {value: 0x0024, lo: 0x94, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0024, lo: 0xaa, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbf}, + // Block 0x1b, offset 0xce + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1c, offset 0xd4 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0x1d, offset 0xe2 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb6, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0xed + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xb1}, + // Block 0x1f, offset 0xf8 + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x20, offset 0x103 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x99, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x21, offset 0x10f + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x22, offset 0x119 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x85}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + // Block 0x23, offset 0x124 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x24, offset 0x12f + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x25, offset 0x13b + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0010, lo: 0xa8, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x26, offset 0x147 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x27, offset 0x14f + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb9}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbf}, + // Block 0x28, offset 0x157 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x88}, + {value: 0x0014, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x29, offset 0x161 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x2a, offset 0x16c + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb2}, + // Block 0x2b, offset 0x178 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xba}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x2c, offset 0x17e + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x94, hi: 0x97}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xba, hi: 0xbf}, + // Block 0x2d, offset 0x189 + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x96}, + {value: 0x0010, lo: 0x9a, hi: 0xb1}, + {value: 0x0010, lo: 0xb3, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x2e, offset 0x18e + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x94}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9f}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + // Block 0x2f, offset 0x196 + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + // Block 0x30, offset 0x199 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x31, offset 0x19e + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xb9}, + {value: 0x0014, lo: 0xbb, hi: 0xbc}, + // Block 0x32, offset 0x1a2 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x33, offset 0x1a6 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0034, lo: 0x98, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0034, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x34, offset 0x1ad + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xac}, + {value: 0x0034, lo: 0xb1, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xba, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x1b6 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0024, lo: 0x82, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x86, hi: 0x87}, + {value: 0x0010, lo: 0x88, hi: 0x8c}, + {value: 0x0014, lo: 0x8d, hi: 0x97}, + {value: 0x0014, lo: 0x99, hi: 0xbc}, + // Block 0x36, offset 0x1be + {value: 0x0034, lo: 0x86, hi: 0x86}, + // Block 0x37, offset 0x1bf + {value: 0x0010, lo: 0xab, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbe}, + // Block 0x38, offset 0x1c8 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x96, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x99}, + {value: 0x0014, lo: 0x9e, hi: 0xa0}, + {value: 0x0010, lo: 0xa2, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xad}, + {value: 0x0014, lo: 0xb1, hi: 0xb4}, + // Block 0x39, offset 0x1cf + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x6c53, lo: 0xa0, hi: 0xbf}, + // Block 0x3a, offset 0x1d7 + {value: 0x7053, lo: 0x80, hi: 0x85}, + {value: 0x7053, lo: 0x87, hi: 0x87}, + {value: 0x7053, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xba}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x3b, offset 0x1dd + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x9a, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x3c, offset 0x1e3 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x3d, offset 0x1e8 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x82, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3e, offset 0x1ec + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3f, offset 0x1ef + {value: 0x0010, lo: 0x80, hi: 0x9a}, + {value: 0x0024, lo: 0x9d, hi: 0x9f}, + // Block 0x40, offset 0x1f1 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x7453, lo: 0xa0, hi: 0xaf}, + {value: 0x7853, lo: 0xb0, hi: 0xbf}, + // Block 0x41, offset 0x1f4 + {value: 0x7c53, lo: 0x80, hi: 0x8f}, + {value: 0x8053, lo: 0x90, hi: 0x9f}, + {value: 0x7c53, lo: 0xa0, hi: 0xaf}, + {value: 0x0813, lo: 0xb0, hi: 0xb5}, + {value: 0x0892, lo: 0xb8, hi: 0xbd}, + // Block 0x42, offset 0x1f9 + {value: 0x0010, lo: 0x81, hi: 0xbf}, + // Block 0x43, offset 0x1fa + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0010, lo: 0xaf, hi: 0xbf}, + // Block 0x44, offset 0x1fc + {value: 0x0010, lo: 0x81, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x45, offset 0x1fe + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb8}, + // Block 0x46, offset 0x200 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0034, lo: 0x94, hi: 0x94}, + {value: 0x0010, lo: 0xa0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + // Block 0x47, offset 0x207 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xac}, + {value: 0x0010, lo: 0xae, hi: 0xb0}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + // Block 0x48, offset 0x20c + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x49, offset 0x210 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x89, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0014, lo: 0x93, hi: 0x93}, + {value: 0x0004, lo: 0x97, hi: 0x97}, + {value: 0x0024, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0x4a, offset 0x219 + {value: 0x0014, lo: 0x8b, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x4b, offset 0x21c + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb7}, + // Block 0x4c, offset 0x21f + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x4d, offset 0x225 + {value: 0x0010, lo: 0x80, hi: 0xb5}, + // Block 0x4e, offset 0x226 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbb}, + // Block 0x4f, offset 0x231 + {value: 0x0010, lo: 0x86, hi: 0x8f}, + // Block 0x50, offset 0x232 + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x51, offset 0x233 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + // Block 0x52, offset 0x238 + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x9e}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xac}, + {value: 0x0010, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xbc}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x53, offset 0x245 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xa7, hi: 0xa7}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + {value: 0x0034, lo: 0xb5, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbc}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0x54, offset 0x24d + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x55, offset 0x255 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0030, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xab, hi: 0xab}, + {value: 0x0034, lo: 0xac, hi: 0xac}, + {value: 0x0024, lo: 0xad, hi: 0xb3}, + // Block 0x56, offset 0x25e + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0030, lo: 0xaa, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbf}, + // Block 0x57, offset 0x267 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0030, lo: 0xb2, hi: 0xb3}, + // Block 0x58, offset 0x270 + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0x59, offset 0x275 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8d, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x5a, offset 0x278 + {value: 0x296a, lo: 0x80, hi: 0x80}, + {value: 0x2a2a, lo: 0x81, hi: 0x81}, + {value: 0x2aea, lo: 0x82, hi: 0x82}, + {value: 0x2baa, lo: 0x83, hi: 0x83}, + {value: 0x2c6a, lo: 0x84, hi: 0x84}, + {value: 0x2d2a, lo: 0x85, hi: 0x85}, + {value: 0x2dea, lo: 0x86, hi: 0x86}, + {value: 0x2eaa, lo: 0x87, hi: 0x87}, + {value: 0x2f6a, lo: 0x88, hi: 0x88}, + // Block 0x5b, offset 0x281 + {value: 0x0024, lo: 0x90, hi: 0x92}, + {value: 0x0034, lo: 0x94, hi: 0x99}, + {value: 0x0024, lo: 0x9a, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9f}, + {value: 0x0024, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xb3}, + {value: 0x0024, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0024, lo: 0xb8, hi: 0xb9}, + // Block 0x5c, offset 0x28e + {value: 0x0012, lo: 0x80, hi: 0xab}, + {value: 0x0015, lo: 0xac, hi: 0xbf}, + // Block 0x5d, offset 0x290 + {value: 0x0015, lo: 0x80, hi: 0xaa}, + {value: 0x0012, lo: 0xab, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb8}, + {value: 0x8452, lo: 0xb9, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbc}, + {value: 0x8852, lo: 0xbd, hi: 0xbd}, + {value: 0x0012, lo: 0xbe, hi: 0xbf}, + // Block 0x5e, offset 0x297 + {value: 0x0012, lo: 0x80, hi: 0x9a}, + {value: 0x0015, lo: 0x9b, hi: 0xbf}, + // Block 0x5f, offset 0x299 + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0024, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0xb5}, + {value: 0x0024, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbd}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x60, offset 0x2a4 + {value: 0x0117, lo: 0x80, hi: 0xbf}, + // Block 0x61, offset 0x2a5 + {value: 0x0117, lo: 0x80, hi: 0x95}, + {value: 0x306a, lo: 0x96, hi: 0x96}, + {value: 0x316a, lo: 0x97, hi: 0x97}, + {value: 0x326a, lo: 0x98, hi: 0x98}, + {value: 0x336a, lo: 0x99, hi: 0x99}, + {value: 0x346a, lo: 0x9a, hi: 0x9a}, + {value: 0x356a, lo: 0x9b, hi: 0x9b}, + {value: 0x0012, lo: 0x9c, hi: 0x9d}, + {value: 0x366b, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0x9f, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x62, offset 0x2b0 + {value: 0x0812, lo: 0x80, hi: 0x87}, + {value: 0x0813, lo: 0x88, hi: 0x8f}, + {value: 0x0812, lo: 0x90, hi: 0x95}, + {value: 0x0813, lo: 0x98, hi: 0x9d}, + {value: 0x0812, lo: 0xa0, hi: 0xa7}, + {value: 0x0813, lo: 0xa8, hi: 0xaf}, + {value: 0x0812, lo: 0xb0, hi: 0xb7}, + {value: 0x0813, lo: 0xb8, hi: 0xbf}, + // Block 0x63, offset 0x2b8 + {value: 0x0004, lo: 0x8b, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8f}, + {value: 0x0054, lo: 0x98, hi: 0x99}, + {value: 0x0054, lo: 0xa4, hi: 0xa4}, + {value: 0x0054, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xaa, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xaf}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x64, offset 0x2c0 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x94, hi: 0x94}, + {value: 0x0014, lo: 0xa0, hi: 0xa4}, + {value: 0x0014, lo: 0xa6, hi: 0xaf}, + {value: 0x0015, lo: 0xb1, hi: 0xb1}, + {value: 0x0015, lo: 0xbf, hi: 0xbf}, + // Block 0x65, offset 0x2c6 + {value: 0x0015, lo: 0x90, hi: 0x9c}, + // Block 0x66, offset 0x2c7 + {value: 0x0024, lo: 0x90, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0xa0}, + {value: 0x0024, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa4}, + {value: 0x0034, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa7}, + {value: 0x0034, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + // Block 0x67, offset 0x2d5 + {value: 0x0016, lo: 0x85, hi: 0x86}, + {value: 0x0012, lo: 0x87, hi: 0x89}, + {value: 0x9d52, lo: 0x8e, hi: 0x8e}, + {value: 0x1013, lo: 0xa0, hi: 0xaf}, + {value: 0x1012, lo: 0xb0, hi: 0xbf}, + // Block 0x68, offset 0x2da + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x88}, + // Block 0x69, offset 0x2dd + {value: 0xa053, lo: 0xb6, hi: 0xb7}, + {value: 0xa353, lo: 0xb8, hi: 0xb9}, + {value: 0xa653, lo: 0xba, hi: 0xbb}, + {value: 0xa353, lo: 0xbc, hi: 0xbd}, + {value: 0xa053, lo: 0xbe, hi: 0xbf}, + // Block 0x6a, offset 0x2e2 + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6553, lo: 0x90, hi: 0x9f}, + {value: 0xa953, lo: 0xa0, hi: 0xae}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0x6b, offset 0x2e6 + {value: 0x0117, lo: 0x80, hi: 0xa3}, + {value: 0x0012, lo: 0xa4, hi: 0xa4}, + {value: 0x0716, lo: 0xab, hi: 0xac}, + {value: 0x0316, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb3}, + // Block 0x6c, offset 0x2ec + {value: 0x6c52, lo: 0x80, hi: 0x9f}, + {value: 0x7052, lo: 0xa0, hi: 0xa5}, + {value: 0x7052, lo: 0xa7, hi: 0xa7}, + {value: 0x7052, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x6d, offset 0x2f1 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x6e, offset 0x2f4 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0010, lo: 0xb0, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x6f, offset 0x2f9 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9e}, + {value: 0x0024, lo: 0xa0, hi: 0xbf}, + // Block 0x70, offset 0x2fe + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + // Block 0x71, offset 0x2ff + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0xaa, hi: 0xad}, + {value: 0x0030, lo: 0xae, hi: 0xaf}, + {value: 0x0004, lo: 0xb1, hi: 0xb5}, + {value: 0x0014, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + // Block 0x72, offset 0x305 + {value: 0x0034, lo: 0x99, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9e}, + // Block 0x73, offset 0x307 + {value: 0x0004, lo: 0xbc, hi: 0xbe}, + // Block 0x74, offset 0x308 + {value: 0x0010, lo: 0x85, hi: 0xad}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x75, offset 0x30a + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0010, lo: 0xa0, hi: 0xba}, + // Block 0x76, offset 0x30c + {value: 0x0010, lo: 0x80, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0xbf}, + // Block 0x77, offset 0x30f + {value: 0x0010, lo: 0x80, hi: 0x8c}, + // Block 0x78, offset 0x310 + {value: 0x0010, lo: 0x90, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x79, offset 0x312 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0xab}, + // Block 0x7a, offset 0x315 + {value: 0x0117, lo: 0x80, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb2}, + {value: 0x0024, lo: 0xb4, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x7b, offset 0x31b + {value: 0x0117, lo: 0x80, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9d}, + {value: 0x0024, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x7c, offset 0x31f + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb1}, + // Block 0x7d, offset 0x321 + {value: 0x0004, lo: 0x80, hi: 0x96}, + {value: 0x0014, lo: 0x97, hi: 0x9f}, + {value: 0x0004, lo: 0xa0, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xaf}, + {value: 0x0012, lo: 0xb0, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xbf}, + // Block 0x7e, offset 0x327 + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x0015, lo: 0xb0, hi: 0xb0}, + {value: 0x0012, lo: 0xb1, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x8453, lo: 0xbd, hi: 0xbd}, + {value: 0x0117, lo: 0xbe, hi: 0xbf}, + // Block 0x7f, offset 0x32e + {value: 0x0010, lo: 0xb7, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbf}, + // Block 0x80, offset 0x332 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x8c, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + // Block 0x81, offset 0x33b + {value: 0x0010, lo: 0x80, hi: 0xb3}, + // Block 0x82, offset 0x33c + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xa0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb7}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x83, offset 0x344 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x84, offset 0x348 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0x92}, + {value: 0x0030, lo: 0x93, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0x85, offset 0x34d + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x86, offset 0x355 + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0004, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x87, offset 0x35b + {value: 0x0010, lo: 0x80, hi: 0xa8}, + {value: 0x0014, lo: 0xa9, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0x88, offset 0x361 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x89, offset 0x36b + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0024, lo: 0xbe, hi: 0xbf}, + // Block 0x8a, offset 0x370 + {value: 0x0024, lo: 0x81, hi: 0x81}, + {value: 0x0004, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + // Block 0x8b, offset 0x379 + {value: 0x0010, lo: 0x81, hi: 0x86}, + {value: 0x0010, lo: 0x89, hi: 0x8e}, + {value: 0x0010, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x8c, offset 0x37f + {value: 0x0012, lo: 0x80, hi: 0x92}, + {value: 0xac52, lo: 0x93, hi: 0x93}, + {value: 0x0012, lo: 0x94, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9f}, + {value: 0x0012, lo: 0xa0, hi: 0xa5}, + {value: 0x74d2, lo: 0xb0, hi: 0xbf}, + // Block 0x8d, offset 0x386 + {value: 0x78d2, lo: 0x80, hi: 0x8f}, + {value: 0x7cd2, lo: 0x90, hi: 0x9f}, + {value: 0x80d2, lo: 0xa0, hi: 0xaf}, + {value: 0x7cd2, lo: 0xb0, hi: 0xbf}, + // Block 0x8e, offset 0x38a + {value: 0x0010, lo: 0x80, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x8f, offset 0x392 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x90, offset 0x394 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x8b, hi: 0xbb}, + // Block 0x91, offset 0x396 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0xbf}, + // Block 0x92, offset 0x399 + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0004, lo: 0xb2, hi: 0xbf}, + // Block 0x93, offset 0x39b + {value: 0x0004, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x93, hi: 0xbf}, + // Block 0x94, offset 0x39d + {value: 0x0010, lo: 0x80, hi: 0xbd}, + // Block 0x95, offset 0x39e + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0x96, offset 0x39f + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0xbf}, + // Block 0x97, offset 0x3a1 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0xb0, hi: 0xbb}, + // Block 0x98, offset 0x3a3 + {value: 0x0014, lo: 0x80, hi: 0x8f}, + {value: 0x0054, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0xa0, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xad}, + {value: 0x0024, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + // Block 0x99, offset 0x3a9 + {value: 0x0010, lo: 0x8d, hi: 0x8f}, + {value: 0x0054, lo: 0x92, hi: 0x92}, + {value: 0x0054, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0xb0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0x9a, offset 0x3ae + {value: 0x0010, lo: 0x80, hi: 0xbc}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x9b, offset 0x3b0 + {value: 0x0054, lo: 0x87, hi: 0x87}, + {value: 0x0054, lo: 0x8e, hi: 0x8e}, + {value: 0x0054, lo: 0x9a, hi: 0x9a}, + {value: 0x5f53, lo: 0xa1, hi: 0xba}, + {value: 0x0004, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9c, offset 0x3b6 + {value: 0x0004, lo: 0x80, hi: 0x80}, + {value: 0x5f52, lo: 0x81, hi: 0x9a}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + // Block 0x9d, offset 0x3b9 + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbe}, + // Block 0x9e, offset 0x3bb + {value: 0x0010, lo: 0x82, hi: 0x87}, + {value: 0x0010, lo: 0x8a, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0x97}, + {value: 0x0010, lo: 0x9a, hi: 0x9c}, + {value: 0x0004, lo: 0xa3, hi: 0xa3}, + {value: 0x0014, lo: 0xb9, hi: 0xbb}, + // Block 0x9f, offset 0x3c1 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0010, lo: 0x8d, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xba}, + {value: 0x0010, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xa0, offset 0x3c6 + {value: 0x0010, lo: 0x80, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x9d}, + // Block 0xa1, offset 0x3c8 + {value: 0x0010, lo: 0x80, hi: 0xba}, + // Block 0xa2, offset 0x3c9 + {value: 0x0010, lo: 0x80, hi: 0xb4}, + // Block 0xa3, offset 0x3ca + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0xa4, offset 0x3cb + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa5, offset 0x3cd + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + // Block 0xa6, offset 0x3cf + {value: 0x0010, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xa7, offset 0x3d1 + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0xb5}, + {value: 0x0024, lo: 0xb6, hi: 0xba}, + // Block 0xa8, offset 0x3d4 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa9, offset 0x3d6 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x91, hi: 0x95}, + // Block 0xaa, offset 0x3d9 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x97}, + {value: 0xaf53, lo: 0x98, hi: 0x9f}, + {value: 0xb253, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x3e1 + {value: 0xaf52, lo: 0x80, hi: 0x87}, + {value: 0xb252, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xac, offset 0x3e4 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0xb253, lo: 0xb0, hi: 0xb7}, + {value: 0xaf53, lo: 0xb8, hi: 0xbf}, + // Block 0xad, offset 0x3e8 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x93}, + {value: 0xb252, lo: 0x98, hi: 0x9f}, + {value: 0xaf52, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbb}, + // Block 0xae, offset 0x3f0 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xaf, offset 0x3f2 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + // Block 0xb0, offset 0x3f3 + {value: 0x0010, lo: 0x80, hi: 0xb6}, + // Block 0xb1, offset 0x3f4 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + // Block 0xb2, offset 0x3f6 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xb5}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x3fc + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xb4, offset 0x3fe + {value: 0x0010, lo: 0x80, hi: 0x9e}, + // Block 0xb5, offset 0x3ff + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + // Block 0xb6, offset 0x401 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb9}, + // Block 0xb7, offset 0x403 + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0xb8, offset 0x405 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x8e, hi: 0x8e}, + {value: 0x0024, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x99, hi: 0xb3}, + {value: 0x0024, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xb9, offset 0x412 + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0xba, offset 0x413 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + // Block 0xbb, offset 0x414 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + // Block 0xbc, offset 0x418 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + // Block 0xbd, offset 0x41a + {value: 0x0010, lo: 0x80, hi: 0x91}, + // Block 0xbe, offset 0x41b + {value: 0x0010, lo: 0x80, hi: 0x88}, + // Block 0xbf, offset 0x41c + {value: 0x5653, lo: 0x80, hi: 0xb2}, + // Block 0xc0, offset 0x41d + {value: 0x5652, lo: 0x80, hi: 0xb2}, + // Block 0xc1, offset 0x41e + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xc2, offset 0x422 + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x426 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb6}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + // Block 0xc4, offset 0x42c + {value: 0x0010, lo: 0x90, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc5, offset 0x42e + {value: 0x0024, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0xc6, offset 0x435 + {value: 0x0010, lo: 0x90, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + // Block 0xc7, offset 0x438 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xc8, offset 0x43c + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + // Block 0xc9, offset 0x442 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb4}, + {value: 0x0030, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xb7}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0xca, offset 0x44b + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xcb, offset 0x451 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa2}, + {value: 0x0014, lo: 0xa3, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xcc, offset 0x457 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xcd, offset 0x461 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0030, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9d, hi: 0xa3}, + {value: 0x0024, lo: 0xa6, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + // Block 0xce, offset 0x46b + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xcf, offset 0x46d + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd0, offset 0x474 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xd1, offset 0x47a + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd2, offset 0x480 + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd3, offset 0x486 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x98, hi: 0x9b}, + {value: 0x0014, lo: 0x9c, hi: 0x9d}, + // Block 0xd4, offset 0x489 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd5, offset 0x48f + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd6, offset 0x492 + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0014, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb5}, + {value: 0x0030, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0xd7, offset 0x49a + {value: 0x0010, lo: 0x80, hi: 0x89}, + // Block 0xd8, offset 0x49b + {value: 0x0014, lo: 0x9d, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xd9, offset 0x4a2 + {value: 0x5f53, lo: 0xa0, hi: 0xbf}, + // Block 0xda, offset 0x4a3 + {value: 0x5f52, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xdb, offset 0x4a6 + {value: 0x0010, lo: 0x80, hi: 0xb8}, + // Block 0xdc, offset 0x4a7 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xdd, offset 0x4ad + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0xde, offset 0x4b0 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0014, lo: 0x92, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xa9}, + {value: 0x0014, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0xdf, offset 0x4b8 + {value: 0x0010, lo: 0x80, hi: 0x99}, + // Block 0xe0, offset 0x4b9 + {value: 0x0010, lo: 0x80, hi: 0xae}, + // Block 0xe1, offset 0x4ba + {value: 0x0010, lo: 0x80, hi: 0x83}, + // Block 0xe2, offset 0x4bb + {value: 0x0010, lo: 0x80, hi: 0x86}, + // Block 0xe3, offset 0x4bc + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xe4, offset 0x4be + {value: 0x0010, lo: 0x90, hi: 0xad}, + {value: 0x0034, lo: 0xb0, hi: 0xb4}, + // Block 0xe5, offset 0x4c0 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + // Block 0xe6, offset 0x4c2 + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa3, hi: 0xb7}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xe7, offset 0x4c6 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + // Block 0xe8, offset 0x4c7 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0xbe}, + // Block 0xe9, offset 0x4c9 + {value: 0x0014, lo: 0x8f, hi: 0x9f}, + // Block 0xea, offset 0x4ca + {value: 0x0014, lo: 0xa0, hi: 0xa0}, + // Block 0xeb, offset 0x4cb + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbc}, + // Block 0xec, offset 0x4cd + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0034, lo: 0x9e, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa3}, + // Block 0xed, offset 0x4d2 + {value: 0x0030, lo: 0xa5, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xa9}, + {value: 0x0030, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbf}, + // Block 0xee, offset 0x4d7 + {value: 0x0034, lo: 0x80, hi: 0x82}, + {value: 0x0024, lo: 0x85, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8b}, + {value: 0x0024, lo: 0xaa, hi: 0xad}, + // Block 0xef, offset 0x4db + {value: 0x0024, lo: 0x82, hi: 0x84}, + // Block 0xf0, offset 0x4dc + {value: 0x0013, lo: 0x80, hi: 0x99}, + {value: 0x0012, lo: 0x9a, hi: 0xb3}, + {value: 0x0013, lo: 0xb4, hi: 0xbf}, + // Block 0xf1, offset 0x4df + {value: 0x0013, lo: 0x80, hi: 0x8d}, + {value: 0x0012, lo: 0x8e, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0xa7}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0xf2, offset 0x4e3 + {value: 0x0013, lo: 0x80, hi: 0x81}, + {value: 0x0012, lo: 0x82, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0x9c}, + {value: 0x0013, lo: 0x9e, hi: 0x9f}, + {value: 0x0013, lo: 0xa2, hi: 0xa2}, + {value: 0x0013, lo: 0xa5, hi: 0xa6}, + {value: 0x0013, lo: 0xa9, hi: 0xac}, + {value: 0x0013, lo: 0xae, hi: 0xb5}, + {value: 0x0012, lo: 0xb6, hi: 0xb9}, + {value: 0x0012, lo: 0xbb, hi: 0xbb}, + {value: 0x0012, lo: 0xbd, hi: 0xbf}, + // Block 0xf3, offset 0x4ee + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0012, lo: 0x85, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0xf4, offset 0x4f2 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0013, lo: 0x84, hi: 0x85}, + {value: 0x0013, lo: 0x87, hi: 0x8a}, + {value: 0x0013, lo: 0x8d, hi: 0x94}, + {value: 0x0013, lo: 0x96, hi: 0x9c}, + {value: 0x0012, lo: 0x9e, hi: 0xb7}, + {value: 0x0013, lo: 0xb8, hi: 0xb9}, + {value: 0x0013, lo: 0xbb, hi: 0xbe}, + // Block 0xf5, offset 0x4fa + {value: 0x0013, lo: 0x80, hi: 0x84}, + {value: 0x0013, lo: 0x86, hi: 0x86}, + {value: 0x0013, lo: 0x8a, hi: 0x90}, + {value: 0x0012, lo: 0x92, hi: 0xab}, + {value: 0x0013, lo: 0xac, hi: 0xbf}, + // Block 0xf6, offset 0x4ff + {value: 0x0013, lo: 0x80, hi: 0x85}, + {value: 0x0012, lo: 0x86, hi: 0x9f}, + {value: 0x0013, lo: 0xa0, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbf}, + // Block 0xf7, offset 0x503 + {value: 0x0012, lo: 0x80, hi: 0x93}, + {value: 0x0013, lo: 0x94, hi: 0xad}, + {value: 0x0012, lo: 0xae, hi: 0xbf}, + // Block 0xf8, offset 0x506 + {value: 0x0012, lo: 0x80, hi: 0x87}, + {value: 0x0013, lo: 0x88, hi: 0xa1}, + {value: 0x0012, lo: 0xa2, hi: 0xbb}, + {value: 0x0013, lo: 0xbc, hi: 0xbf}, + // Block 0xf9, offset 0x50a + {value: 0x0013, lo: 0x80, hi: 0x95}, + {value: 0x0012, lo: 0x96, hi: 0xaf}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0xfa, offset 0x50d + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0012, lo: 0x8a, hi: 0xa5}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0xfb, offset 0x510 + {value: 0x0013, lo: 0x80, hi: 0x80}, + {value: 0x0012, lo: 0x82, hi: 0x9a}, + {value: 0x0012, lo: 0x9c, hi: 0xa1}, + {value: 0x0013, lo: 0xa2, hi: 0xba}, + {value: 0x0012, lo: 0xbc, hi: 0xbf}, + // Block 0xfc, offset 0x515 + {value: 0x0012, lo: 0x80, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0xb4}, + {value: 0x0012, lo: 0xb6, hi: 0xbf}, + // Block 0xfd, offset 0x519 + {value: 0x0012, lo: 0x80, hi: 0x8e}, + {value: 0x0012, lo: 0x90, hi: 0x95}, + {value: 0x0013, lo: 0x96, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0xfe, offset 0x51d + {value: 0x0012, lo: 0x80, hi: 0x88}, + {value: 0x0012, lo: 0x8a, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0xff, offset 0x521 + {value: 0x0012, lo: 0x80, hi: 0x82}, + {value: 0x0012, lo: 0x84, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8b}, + {value: 0x0010, lo: 0x8e, hi: 0xbf}, + // Block 0x100, offset 0x525 + {value: 0x0014, lo: 0x80, hi: 0xb6}, + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x101, offset 0x527 + {value: 0x0014, lo: 0x80, hi: 0xac}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x102, offset 0x529 + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x9b, hi: 0x9f}, + {value: 0x0014, lo: 0xa1, hi: 0xaf}, + // Block 0x103, offset 0x52c + {value: 0x0024, lo: 0x80, hi: 0x86}, + {value: 0x0024, lo: 0x88, hi: 0x98}, + {value: 0x0024, lo: 0x9b, hi: 0xa1}, + {value: 0x0024, lo: 0xa3, hi: 0xa4}, + {value: 0x0024, lo: 0xa6, hi: 0xaa}, + // Block 0x104, offset 0x531 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0034, lo: 0x90, hi: 0x96}, + // Block 0x105, offset 0x533 + {value: 0xb552, lo: 0x80, hi: 0x81}, + {value: 0xb852, lo: 0x82, hi: 0x83}, + {value: 0x0024, lo: 0x84, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x106, offset 0x538 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x9f}, + {value: 0x0010, lo: 0xa1, hi: 0xa2}, + {value: 0x0010, lo: 0xa4, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb7}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + // Block 0x107, offset 0x541 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x9b}, + {value: 0x0010, lo: 0xa1, hi: 0xa3}, + {value: 0x0010, lo: 0xa5, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xbb}, + // Block 0x108, offset 0x546 + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x109, offset 0x547 + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x10a, offset 0x54a + {value: 0x0013, lo: 0x80, hi: 0x89}, + // Block 0x10b, offset 0x54b + {value: 0x0004, lo: 0xbb, hi: 0xbf}, + // Block 0x10c, offset 0x54c + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0014, lo: 0xa0, hi: 0xbf}, + // Block 0x10d, offset 0x54e + {value: 0x0014, lo: 0x80, hi: 0xbf}, + // Block 0x10e, offset 0x54f + {value: 0x0014, lo: 0x80, hi: 0xaf}, +} + +// Total table size 13811 bytes (13KiB); checksum: 4CC48DA3 diff --git a/vendor/golang.org/x/text/cases/trieval.go b/vendor/golang.org/x/text/cases/trieval.go new file mode 100644 index 000000000..fb221f843 --- /dev/null +++ b/vendor/golang.org/x/text/cases/trieval.go @@ -0,0 +1,215 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package cases + +// This file contains definitions for interpreting the trie value of the case +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds case information for a single rune. It is the value returned +// by a trie lookup. Most mapping information can be stored in a single 16-bit +// value. If not, for example when a rune is mapped to multiple runes, the value +// stores some basic case data and an index into an array with additional data. +// +// The per-rune values have the following format: +// +// if (exception) { +// 15..5 unsigned exception index +// 4 unused +// } else { +// 15..8 XOR pattern or index to XOR pattern for case mapping +// Only 13..8 are used for XOR patterns. +// 7 inverseFold (fold to upper, not to lower) +// 6 index: interpret the XOR pattern as an index +// or isMid if case mode is cIgnorableUncased. +// 5..4 CCC: zero (normal or break), above or other +// } +// 3 exception: interpret this value as an exception index +// (TODO: is this bit necessary? Probably implied from case mode.) +// 2..0 case mode +// +// For the non-exceptional cases, a rune must be either uncased, lowercase or +// uppercase. If the rune is cased, the XOR pattern maps either a lowercase +// rune to uppercase or an uppercase rune to lowercase (applied to the 10 +// least-significant bits of the rune). +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + casedMask = 0x0003 + fullCasedMask = 0x0007 + ignorableMask = 0x0006 + ignorableValue = 0x0004 + + inverseFoldBit = 1 << 7 + isMidBit = 1 << 6 + + exceptionBit = 1 << 3 + exceptionShift = 5 + numExceptionBits = 11 + + xorIndexBit = 1 << 6 + xorShift = 8 + + // There is no mapping if all xor bits and the exception bit are zero. + hasMappingMask = 0xff80 | exceptionBit +) + +// The case mode bits encodes the case type of a rune. This includes uncased, +// title, upper and lower case and case ignorable. (For a definition of these +// terms see Chapter 3 of The Unicode Standard Core Specification.) In some rare +// cases, a rune can be both cased and case-ignorable. This is encoded by +// cIgnorableCased. A rune of this type is always lower case. Some runes are +// cased while not having a mapping. +// +// A common pattern for scripts in the Unicode standard is for upper and lower +// case runes to alternate for increasing rune values (e.g. the accented Latin +// ranges starting from U+0100 and U+1E00 among others and some Cyrillic +// characters). We use this property by defining a cXORCase mode, where the case +// mode (always upper or lower case) is derived from the rune value. As the XOR +// pattern for case mappings is often identical for successive runes, using +// cXORCase can result in large series of identical trie values. This, in turn, +// allows us to better compress the trie blocks. +const ( + cUncased info = iota // 000 + cTitle // 001 + cLower // 010 + cUpper // 011 + cIgnorableUncased // 100 + cIgnorableCased // 101 // lower case if mappings exist + cXORCase // 11x // case is cLower | ((rune&1) ^ x) + + maxCaseMode = cUpper +) + +func (c info) isCased() bool { + return c&casedMask != 0 +} + +func (c info) isCaseIgnorable() bool { + return c&ignorableMask == ignorableValue +} + +func (c info) isNotCasedAndNotCaseIgnorable() bool { + return c&fullCasedMask == 0 +} + +func (c info) isCaseIgnorableAndNotCased() bool { + return c&fullCasedMask == cIgnorableUncased +} + +func (c info) isMid() bool { + return c&(fullCasedMask|isMidBit) == isMidBit|cIgnorableUncased +} + +// The case mapping implementation will need to know about various Canonical +// Combining Class (CCC) values. We encode two of these in the trie value: +// cccZero (0) and cccAbove (230). If the value is cccOther, it means that +// CCC(r) > 0, but not 230. A value of cccBreak means that CCC(r) == 0 and that +// the rune also has the break category Break (see below). +const ( + cccBreak info = iota << 4 + cccZero + cccAbove + cccOther + + cccMask = cccBreak | cccZero | cccAbove | cccOther +) + +const ( + starter = 0 + above = 230 + iotaSubscript = 240 +) + +// The exceptions slice holds data that does not fit in a normal info entry. +// The entry is pointed to by the exception index in an entry. It has the +// following format: +// +// Header +// byte 0: +// 7..6 unused +// 5..4 CCC type (same bits as entry) +// 3 unused +// 2..0 length of fold +// +// byte 1: +// 7..6 unused +// 5..3 length of 1st mapping of case type +// 2..0 length of 2nd mapping of case type +// +// case 1st 2nd +// lower -> upper, title +// upper -> lower, title +// title -> lower, upper +// +// Lengths with the value 0x7 indicate no value and implies no change. +// A length of 0 indicates a mapping to zero-length string. +// +// Body bytes: +// case folding bytes +// lowercase mapping bytes +// uppercase mapping bytes +// titlecase mapping bytes +// closure mapping bytes (for NFKC_Casefold). (TODO) +// +// Fallbacks: +// missing fold -> lower +// missing title -> upper +// all missing -> original rune +// +// exceptions starts with a dummy byte to enforce that there is no zero index +// value. +const ( + lengthMask = 0x07 + lengthBits = 3 + noChange = 0 +) + +// References to generated trie. + +var trie = newCaseTrie(0) + +var sparse = sparseBlocks{ + values: sparseValues[:], + offsets: sparseOffsets[:], +} + +// Sparse block lookup code. + +// valueRange is an entry in a sparse block. +type valueRange struct { + value uint16 + lo, hi byte +} + +type sparseBlocks struct { + values []valueRange + offsets []uint16 +} + +// lookup returns the value from values block n for byte b using binary search. +func (s *sparseBlocks) lookup(n uint32, b byte) uint16 { + lo := s.offsets[n] + hi := s.offsets[n+1] + for lo < hi { + m := lo + (hi-lo)/2 + r := s.values[m] + if r.lo <= b && b <= r.hi { + return r.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} + +// lastRuneForTesting is the last rune used for testing. Everything after this +// is boring. +const lastRuneForTesting = rune(0x1FFFF) diff --git a/vendor/golang.org/x/text/internal/internal.go b/vendor/golang.org/x/text/internal/internal.go new file mode 100644 index 000000000..eac832850 --- /dev/null +++ b/vendor/golang.org/x/text/internal/internal.go @@ -0,0 +1,51 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package internal contains non-exported functionality that are used by +// packages in the text repository. +package internal // import "golang.org/x/text/internal" + +import ( + "sort" + + "golang.org/x/text/language" +) + +// SortTags sorts tags in place. +func SortTags(tags []language.Tag) { + sort.Sort(sorter(tags)) +} + +type sorter []language.Tag + +func (s sorter) Len() int { + return len(s) +} + +func (s sorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s sorter) Less(i, j int) bool { + return s[i].String() < s[j].String() +} + +// UniqueTags sorts and filters duplicate tags in place and returns a slice with +// only unique tags. +func UniqueTags(tags []language.Tag) []language.Tag { + if len(tags) <= 1 { + return tags + } + SortTags(tags) + k := 0 + for i := 1; i < len(tags); i++ { + if tags[k].String() < tags[i].String() { + k++ + tags[k] = tags[i] + } + } + return tags[:k+1] +} diff --git a/vendor/golang.org/x/text/internal/match.go b/vendor/golang.org/x/text/internal/match.go new file mode 100644 index 000000000..a67fcaca1 --- /dev/null +++ b/vendor/golang.org/x/text/internal/match.go @@ -0,0 +1,67 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// This file contains matchers that implement CLDR inheritance. +// +// See http://unicode.org/reports/tr35/#Locale_Inheritance. +// +// Some of the inheritance described in this document is already handled by +// the cldr package. + +import ( + "golang.org/x/text/language" +) + +// TODO: consider if (some of the) matching algorithm needs to be public after +// getting some feel about what is generic and what is specific. + +// NewInheritanceMatcher returns a matcher that matches based on the inheritance +// chain. +// +// The matcher uses canonicalization and the parent relationship to find a +// match. The resulting match will always be either Und or a language with the +// same language and script as the requested language. It will not match +// languages for which there is understood to be mutual or one-directional +// intelligibility. +// +// A Match will indicate an Exact match if the language matches after +// canonicalization and High if the matched tag is a parent. +func NewInheritanceMatcher(t []language.Tag) *InheritanceMatcher { + tags := &InheritanceMatcher{make(map[language.Tag]int)} + for i, tag := range t { + ct, err := language.All.Canonicalize(tag) + if err != nil { + ct = tag + } + tags.index[ct] = i + } + return tags +} + +type InheritanceMatcher struct { + index map[language.Tag]int +} + +func (m InheritanceMatcher) Match(want ...language.Tag) (language.Tag, int, language.Confidence) { + for _, t := range want { + ct, err := language.All.Canonicalize(t) + if err != nil { + ct = t + } + conf := language.Exact + for { + if index, ok := m.index[ct]; ok { + return ct, index, conf + } + if ct == language.Und { + break + } + ct = ct.Parent() + conf = language.High + } + } + return language.Und, 0, language.No +} diff --git a/vendor/golang.org/x/text/internal/tables.go b/vendor/golang.org/x/text/internal/tables.go new file mode 100644 index 000000000..7fb15f6b8 --- /dev/null +++ b/vendor/golang.org/x/text/internal/tables.go @@ -0,0 +1,116 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package internal + +// Parent maps a compact index of a tag to the compact index of the parent of +// this tag. +var Parent = []uint16{ // 752 elements + // Entry 0 - 3F + 0x0000, 0x0053, 0x00e5, 0x0000, 0x0003, 0x0003, 0x0000, 0x0006, + 0x0000, 0x0008, 0x0000, 0x000a, 0x0000, 0x000c, 0x000c, 0x000c, + 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, + 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, + 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, + 0x000c, 0x0000, 0x0000, 0x002a, 0x0000, 0x002c, 0x0000, 0x002e, + 0x0000, 0x0000, 0x0031, 0x0030, 0x0030, 0x0000, 0x0035, 0x0000, + 0x0037, 0x0000, 0x0039, 0x0000, 0x003b, 0x0000, 0x003d, 0x0000, + // Entry 40 - 7F + 0x0000, 0x0040, 0x0000, 0x0042, 0x0042, 0x0000, 0x0045, 0x0045, + 0x0000, 0x0048, 0x0000, 0x004a, 0x0000, 0x0000, 0x004d, 0x004c, + 0x004c, 0x0000, 0x0051, 0x0051, 0x0051, 0x0051, 0x0000, 0x0056, + 0x0000, 0x0058, 0x0000, 0x005a, 0x0000, 0x005c, 0x005c, 0x0000, + 0x005f, 0x0000, 0x0061, 0x0000, 0x0063, 0x0000, 0x0065, 0x0065, + 0x0000, 0x0068, 0x0000, 0x006a, 0x006a, 0x006a, 0x006a, 0x006a, + 0x006a, 0x006a, 0x0000, 0x0072, 0x0000, 0x0074, 0x0000, 0x0076, + 0x0000, 0x0000, 0x0079, 0x0000, 0x007b, 0x0000, 0x007d, 0x0000, + // Entry 80 - BF + 0x007f, 0x007f, 0x0000, 0x0082, 0x0082, 0x0000, 0x0085, 0x0086, + 0x0086, 0x0086, 0x0085, 0x0087, 0x0086, 0x0086, 0x0086, 0x0085, + 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087, 0x0086, + 0x0086, 0x0086, 0x0086, 0x0087, 0x0086, 0x0087, 0x0086, 0x0086, + 0x0087, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, + 0x0086, 0x0086, 0x0085, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, + 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, + 0x0086, 0x0086, 0x0086, 0x0086, 0x0085, 0x0086, 0x0085, 0x0086, + // Entry C0 - FF + 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087, + 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0085, + 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087, 0x0086, 0x0086, + 0x0087, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, + 0x0086, 0x0086, 0x0086, 0x0086, 0x0085, 0x0085, 0x0086, 0x0086, + 0x0085, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0000, 0x00ee, + 0x0000, 0x00f0, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f1, + 0x00f1, 0x00f1, 0x00f0, 0x00f1, 0x00f0, 0x00f0, 0x00f1, 0x00f1, + // Entry 100 - 13F + 0x00f0, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f0, 0x00f1, 0x00f1, + 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x0000, 0x010c, 0x0000, 0x010e, + 0x0000, 0x0110, 0x0000, 0x0112, 0x0112, 0x0000, 0x0115, 0x0115, + 0x0115, 0x0115, 0x0000, 0x011a, 0x0000, 0x011c, 0x0000, 0x011e, + 0x011e, 0x0000, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, + 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, + 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, + 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, + // Entry 140 - 17F + 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, + 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, + 0x0000, 0x0150, 0x0000, 0x0152, 0x0000, 0x0154, 0x0000, 0x0156, + 0x0000, 0x0158, 0x0000, 0x015a, 0x015a, 0x015a, 0x0000, 0x015e, + 0x0000, 0x0000, 0x0161, 0x0000, 0x0163, 0x0000, 0x0165, 0x0165, + 0x0165, 0x0000, 0x0169, 0x0000, 0x016b, 0x0000, 0x016d, 0x0000, + 0x016f, 0x016f, 0x0000, 0x0172, 0x0000, 0x0174, 0x0000, 0x0176, + 0x0000, 0x0178, 0x0000, 0x017a, 0x0000, 0x017c, 0x0000, 0x017e, + // Entry 180 - 1BF + 0x0000, 0x0180, 0x0180, 0x0180, 0x0000, 0x0000, 0x0185, 0x0000, + 0x0000, 0x0188, 0x0000, 0x018a, 0x0000, 0x0000, 0x018d, 0x0000, + 0x018f, 0x0000, 0x0000, 0x0192, 0x0000, 0x0000, 0x0195, 0x0000, + 0x0197, 0x0000, 0x0199, 0x0000, 0x019b, 0x0000, 0x019d, 0x0000, + 0x019f, 0x0000, 0x01a1, 0x0000, 0x01a3, 0x0000, 0x01a5, 0x0000, + 0x01a7, 0x0000, 0x01a9, 0x01a9, 0x0000, 0x01ac, 0x0000, 0x01ae, + 0x0000, 0x01b0, 0x0000, 0x01b2, 0x0000, 0x01b4, 0x0000, 0x0000, + 0x01b7, 0x0000, 0x01b9, 0x0000, 0x01bb, 0x0000, 0x01bd, 0x0000, + // Entry 1C0 - 1FF + 0x01bf, 0x0000, 0x01c1, 0x0000, 0x01c3, 0x01c3, 0x01c3, 0x01c3, + 0x0000, 0x01c8, 0x0000, 0x01ca, 0x01ca, 0x0000, 0x01cd, 0x0000, + 0x01cf, 0x0000, 0x01d1, 0x0000, 0x01d3, 0x0000, 0x01d5, 0x0000, + 0x01d7, 0x01d7, 0x0000, 0x01da, 0x0000, 0x01dc, 0x0000, 0x01de, + 0x0000, 0x01e0, 0x0000, 0x01e2, 0x0000, 0x01e4, 0x0000, 0x01e6, + 0x0000, 0x01e8, 0x0000, 0x01ea, 0x0000, 0x01ec, 0x01ec, 0x01ec, + 0x0000, 0x01f0, 0x0000, 0x01f2, 0x0000, 0x01f4, 0x0000, 0x01f6, + 0x0000, 0x0000, 0x01f9, 0x0000, 0x01fb, 0x01fb, 0x0000, 0x01fe, + // Entry 200 - 23F + 0x0000, 0x0200, 0x0200, 0x0000, 0x0203, 0x0203, 0x0000, 0x0206, + 0x0206, 0x0206, 0x0206, 0x0206, 0x0206, 0x0206, 0x0000, 0x020e, + 0x0000, 0x0210, 0x0000, 0x0212, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0218, 0x0000, 0x0000, 0x021b, 0x0000, 0x021d, 0x021d, + 0x0000, 0x0220, 0x0000, 0x0222, 0x0222, 0x0000, 0x0000, 0x0226, + 0x0225, 0x0225, 0x0000, 0x0000, 0x022b, 0x0000, 0x022d, 0x0000, + 0x022f, 0x0000, 0x023b, 0x0231, 0x023b, 0x023b, 0x023b, 0x023b, + 0x023b, 0x023b, 0x023b, 0x0231, 0x023b, 0x023b, 0x0000, 0x023e, + // Entry 240 - 27F + 0x023e, 0x023e, 0x0000, 0x0242, 0x0000, 0x0244, 0x0000, 0x0246, + 0x0246, 0x0000, 0x0249, 0x0000, 0x024b, 0x024b, 0x024b, 0x024b, + 0x024b, 0x024b, 0x0000, 0x0252, 0x0000, 0x0254, 0x0000, 0x0256, + 0x0000, 0x0258, 0x0000, 0x025a, 0x0000, 0x0000, 0x025d, 0x025d, + 0x025d, 0x0000, 0x0261, 0x0000, 0x0263, 0x0000, 0x0265, 0x0000, + 0x0000, 0x0268, 0x0267, 0x0267, 0x0000, 0x026c, 0x0000, 0x026e, + 0x0000, 0x0270, 0x0000, 0x0000, 0x0000, 0x0000, 0x0275, 0x0000, + 0x0000, 0x0278, 0x0000, 0x027a, 0x027a, 0x027a, 0x027a, 0x0000, + // Entry 280 - 2BF + 0x027f, 0x027f, 0x027f, 0x0000, 0x0283, 0x0283, 0x0283, 0x0283, + 0x0283, 0x0000, 0x0289, 0x0289, 0x0289, 0x0289, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0291, 0x0291, 0x0291, 0x0000, 0x0295, 0x0295, + 0x0295, 0x0295, 0x0000, 0x0000, 0x029b, 0x029b, 0x029b, 0x029b, + 0x0000, 0x02a0, 0x0000, 0x02a2, 0x02a2, 0x0000, 0x02a5, 0x0000, + 0x02a7, 0x02a7, 0x0000, 0x0000, 0x02ab, 0x0000, 0x0000, 0x02ae, + 0x0000, 0x02b0, 0x02b0, 0x0000, 0x0000, 0x02b4, 0x0000, 0x02b6, + 0x0000, 0x02b8, 0x0000, 0x02ba, 0x0000, 0x02bc, 0x02bc, 0x0000, + // Entry 2C0 - 2FF + 0x0000, 0x02c0, 0x0000, 0x02c2, 0x02bf, 0x02bf, 0x0000, 0x0000, + 0x02c7, 0x02c6, 0x02c6, 0x0000, 0x0000, 0x02cc, 0x0000, 0x02ce, + 0x0000, 0x02d0, 0x0000, 0x0000, 0x02d3, 0x0000, 0x0000, 0x0000, + 0x02d7, 0x0000, 0x02d9, 0x0000, 0x02db, 0x0000, 0x02dd, 0x02dd, + 0x0000, 0x02e0, 0x0000, 0x02e2, 0x0000, 0x02e4, 0x02e4, 0x02e4, + 0x02e4, 0x02e4, 0x0000, 0x02ea, 0x02eb, 0x02ea, 0x0000, 0x02ee, +} // Size: 1528 bytes + +// Total table size 1528 bytes (1KiB); checksum: B99CF952 diff --git a/vendor/golang.org/x/text/internal/tag/tag.go b/vendor/golang.org/x/text/internal/tag/tag.go new file mode 100644 index 000000000..b5d348891 --- /dev/null +++ b/vendor/golang.org/x/text/internal/tag/tag.go @@ -0,0 +1,100 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag contains functionality handling tags and related data. +package tag // import "golang.org/x/text/internal/tag" + +import "sort" + +// An Index converts tags to a compact numeric value. +// +// All elements are of size 4. Tags may be up to 4 bytes long. Excess bytes can +// be used to store additional information about the tag. +type Index string + +// Elem returns the element data at the given index. +func (s Index) Elem(x int) string { + return string(s[x*4 : x*4+4]) +} + +// Index reports the index of the given key or -1 if it could not be found. +// Only the first len(key) bytes from the start of the 4-byte entries will be +// considered for the search and the first match in Index will be returned. +func (s Index) Index(key []byte) int { + n := len(key) + // search the index of the first entry with an equal or higher value than + // key in s. + index := sort.Search(len(s)/4, func(i int) bool { + return cmp(s[i*4:i*4+n], key) != -1 + }) + i := index * 4 + if cmp(s[i:i+len(key)], key) != 0 { + return -1 + } + return index +} + +// Next finds the next occurrence of key after index x, which must have been +// obtained from a call to Index using the same key. It returns x+1 or -1. +func (s Index) Next(key []byte, x int) int { + if x++; x*4 < len(s) && cmp(s[x*4:x*4+len(key)], key) == 0 { + return x + } + return -1 +} + +// cmp returns an integer comparing a and b lexicographically. +func cmp(a Index, b []byte) int { + n := len(a) + if len(b) < n { + n = len(b) + } + for i, c := range b[:n] { + switch { + case a[i] > c: + return 1 + case a[i] < c: + return -1 + } + } + switch { + case len(a) < len(b): + return -1 + case len(a) > len(b): + return 1 + } + return 0 +} + +// Compare returns an integer comparing a and b lexicographically. +func Compare(a string, b []byte) int { + return cmp(Index(a), b) +} + +// FixCase reformats b to the same pattern of cases as form. +// If returns false if string b is malformed. +func FixCase(form string, b []byte) bool { + if len(form) != len(b) { + return false + } + for i, c := range b { + if form[i] <= 'Z' { + if c >= 'a' { + c -= 'z' - 'Z' + } + if c < 'A' || 'Z' < c { + return false + } + } else { + if c <= 'Z' { + c += 'z' - 'Z' + } + if c < 'a' || 'z' < c { + return false + } + } + b[i] = c + } + return true +} diff --git a/vendor/golang.org/x/text/language/common.go b/vendor/golang.org/x/text/language/common.go new file mode 100644 index 000000000..9d86e1855 --- /dev/null +++ b/vendor/golang.org/x/text/language/common.go @@ -0,0 +1,16 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package language + +// This file contains code common to the maketables.go and the package code. + +// langAliasType is the type of an alias in langAliasMap. +type langAliasType int8 + +const ( + langDeprecated langAliasType = iota + langMacro + langLegacy + + langAliasTypeUnknown langAliasType = -1 +) diff --git a/vendor/golang.org/x/text/language/coverage.go b/vendor/golang.org/x/text/language/coverage.go new file mode 100644 index 000000000..101fd23c1 --- /dev/null +++ b/vendor/golang.org/x/text/language/coverage.go @@ -0,0 +1,197 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "fmt" + "sort" +) + +// The Coverage interface is used to define the level of coverage of an +// internationalization service. Note that not all types are supported by all +// services. As lists may be generated on the fly, it is recommended that users +// of a Coverage cache the results. +type Coverage interface { + // Tags returns the list of supported tags. + Tags() []Tag + + // BaseLanguages returns the list of supported base languages. + BaseLanguages() []Base + + // Scripts returns the list of supported scripts. + Scripts() []Script + + // Regions returns the list of supported regions. + Regions() []Region +} + +var ( + // Supported defines a Coverage that lists all supported subtags. Tags + // always returns nil. + Supported Coverage = allSubtags{} +) + +// TODO: +// - Support Variants, numbering systems. +// - CLDR coverage levels. +// - Set of common tags defined in this package. + +type allSubtags struct{} + +// Regions returns the list of supported regions. As all regions are in a +// consecutive range, it simply returns a slice of numbers in increasing order. +// The "undefined" region is not returned. +func (s allSubtags) Regions() []Region { + reg := make([]Region, numRegions) + for i := range reg { + reg[i] = Region{regionID(i + 1)} + } + return reg +} + +// Scripts returns the list of supported scripts. As all scripts are in a +// consecutive range, it simply returns a slice of numbers in increasing order. +// The "undefined" script is not returned. +func (s allSubtags) Scripts() []Script { + scr := make([]Script, numScripts) + for i := range scr { + scr[i] = Script{scriptID(i + 1)} + } + return scr +} + +// BaseLanguages returns the list of all supported base languages. It generates +// the list by traversing the internal structures. +func (s allSubtags) BaseLanguages() []Base { + base := make([]Base, 0, numLanguages) + for i := 0; i < langNoIndexOffset; i++ { + // We included "und" already for the value 0. + if i != nonCanonicalUnd { + base = append(base, Base{langID(i)}) + } + } + i := langNoIndexOffset + for _, v := range langNoIndex { + for k := 0; k < 8; k++ { + if v&1 == 1 { + base = append(base, Base{langID(i)}) + } + v >>= 1 + i++ + } + } + return base +} + +// Tags always returns nil. +func (s allSubtags) Tags() []Tag { + return nil +} + +// coverage is used used by NewCoverage which is used as a convenient way for +// creating Coverage implementations for partially defined data. Very often a +// package will only need to define a subset of slices. coverage provides a +// convenient way to do this. Moreover, packages using NewCoverage, instead of +// their own implementation, will not break if later new slice types are added. +type coverage struct { + tags func() []Tag + bases func() []Base + scripts func() []Script + regions func() []Region +} + +func (s *coverage) Tags() []Tag { + if s.tags == nil { + return nil + } + return s.tags() +} + +// bases implements sort.Interface and is used to sort base languages. +type bases []Base + +func (b bases) Len() int { + return len(b) +} + +func (b bases) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b bases) Less(i, j int) bool { + return b[i].langID < b[j].langID +} + +// BaseLanguages returns the result from calling s.bases if it is specified or +// otherwise derives the set of supported base languages from tags. +func (s *coverage) BaseLanguages() []Base { + if s.bases == nil { + tags := s.Tags() + if len(tags) == 0 { + return nil + } + a := make([]Base, len(tags)) + for i, t := range tags { + a[i] = Base{langID(t.lang)} + } + sort.Sort(bases(a)) + k := 0 + for i := 1; i < len(a); i++ { + if a[k] != a[i] { + k++ + a[k] = a[i] + } + } + return a[:k+1] + } + return s.bases() +} + +func (s *coverage) Scripts() []Script { + if s.scripts == nil { + return nil + } + return s.scripts() +} + +func (s *coverage) Regions() []Region { + if s.regions == nil { + return nil + } + return s.regions() +} + +// NewCoverage returns a Coverage for the given lists. It is typically used by +// packages providing internationalization services to define their level of +// coverage. A list may be of type []T or func() []T, where T is either Tag, +// Base, Script or Region. The returned Coverage derives the value for Bases +// from Tags if no func or slice for []Base is specified. For other unspecified +// types the returned Coverage will return nil for the respective methods. +func NewCoverage(list ...interface{}) Coverage { + s := &coverage{} + for _, x := range list { + switch v := x.(type) { + case func() []Base: + s.bases = v + case func() []Script: + s.scripts = v + case func() []Region: + s.regions = v + case func() []Tag: + s.tags = v + case []Base: + s.bases = func() []Base { return v } + case []Script: + s.scripts = func() []Script { return v } + case []Region: + s.regions = func() []Region { return v } + case []Tag: + s.tags = func() []Tag { return v } + default: + panic(fmt.Sprintf("language: unsupported set type %T", v)) + } + } + return s +} diff --git a/vendor/golang.org/x/text/language/go1_1.go b/vendor/golang.org/x/text/language/go1_1.go new file mode 100644 index 000000000..380f4c09f --- /dev/null +++ b/vendor/golang.org/x/text/language/go1_1.go @@ -0,0 +1,38 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.2 + +package language + +import "sort" + +func sortStable(s sort.Interface) { + ss := stableSort{ + s: s, + pos: make([]int, s.Len()), + } + for i := range ss.pos { + ss.pos[i] = i + } + sort.Sort(&ss) +} + +type stableSort struct { + s sort.Interface + pos []int +} + +func (s *stableSort) Len() int { + return len(s.pos) +} + +func (s *stableSort) Less(i, j int) bool { + return s.s.Less(i, j) || !s.s.Less(j, i) && s.pos[i] < s.pos[j] +} + +func (s *stableSort) Swap(i, j int) { + s.s.Swap(i, j) + s.pos[i], s.pos[j] = s.pos[j], s.pos[i] +} diff --git a/vendor/golang.org/x/text/language/go1_2.go b/vendor/golang.org/x/text/language/go1_2.go new file mode 100644 index 000000000..38268c57a --- /dev/null +++ b/vendor/golang.org/x/text/language/go1_2.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.2 + +package language + +import "sort" + +var sortStable = sort.Stable diff --git a/vendor/golang.org/x/text/language/index.go b/vendor/golang.org/x/text/language/index.go new file mode 100644 index 000000000..b370ffaac --- /dev/null +++ b/vendor/golang.org/x/text/language/index.go @@ -0,0 +1,767 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package language + +// NumCompactTags is the number of common tags. The maximum tag is +// NumCompactTags-1. +const NumCompactTags = 752 + +var specialTags = []Tag{ // 2 elements + 0: {lang: 0xd5, region: 0x6d, script: 0x0, pVariant: 0x5, pExt: 0xe, str: "ca-ES-valencia"}, + 1: {lang: 0x134, region: 0x134, script: 0x0, pVariant: 0x5, pExt: 0x5, str: "en-US-u-va-posix"}, +} // Size: 72 bytes + +var coreTags = map[uint32]uint16{ + 0x0: 0, // und + 0x01500000: 3, // af + 0x015000d1: 4, // af-NA + 0x01500160: 5, // af-ZA + 0x01b00000: 6, // agq + 0x01b00051: 7, // agq-CM + 0x02000000: 8, // ak + 0x0200007f: 9, // ak-GH + 0x02600000: 10, // am + 0x0260006e: 11, // am-ET + 0x03900000: 12, // ar + 0x03900001: 13, // ar-001 + 0x03900022: 14, // ar-AE + 0x03900038: 15, // ar-BH + 0x03900061: 16, // ar-DJ + 0x03900066: 17, // ar-DZ + 0x0390006a: 18, // ar-EG + 0x0390006b: 19, // ar-EH + 0x0390006c: 20, // ar-ER + 0x03900096: 21, // ar-IL + 0x0390009a: 22, // ar-IQ + 0x039000a0: 23, // ar-JO + 0x039000a7: 24, // ar-KM + 0x039000ab: 25, // ar-KW + 0x039000af: 26, // ar-LB + 0x039000b8: 27, // ar-LY + 0x039000b9: 28, // ar-MA + 0x039000c8: 29, // ar-MR + 0x039000e0: 30, // ar-OM + 0x039000ec: 31, // ar-PS + 0x039000f2: 32, // ar-QA + 0x03900107: 33, // ar-SA + 0x0390010a: 34, // ar-SD + 0x03900114: 35, // ar-SO + 0x03900116: 36, // ar-SS + 0x0390011b: 37, // ar-SY + 0x0390011f: 38, // ar-TD + 0x03900127: 39, // ar-TN + 0x0390015d: 40, // ar-YE + 0x03f00000: 41, // ars + 0x04200000: 42, // as + 0x04200098: 43, // as-IN + 0x04300000: 44, // asa + 0x0430012e: 45, // asa-TZ + 0x04700000: 46, // ast + 0x0470006d: 47, // ast-ES + 0x05700000: 48, // az + 0x0571e000: 49, // az-Cyrl + 0x0571e031: 50, // az-Cyrl-AZ + 0x05752000: 51, // az-Latn + 0x05752031: 52, // az-Latn-AZ + 0x05d00000: 53, // bas + 0x05d00051: 54, // bas-CM + 0x07000000: 55, // be + 0x07000046: 56, // be-BY + 0x07400000: 57, // bem + 0x07400161: 58, // bem-ZM + 0x07800000: 59, // bez + 0x0780012e: 60, // bez-TZ + 0x07d00000: 61, // bg + 0x07d00037: 62, // bg-BG + 0x08100000: 63, // bh + 0x09e00000: 64, // bm + 0x09e000c2: 65, // bm-ML + 0x0a300000: 66, // bn + 0x0a300034: 67, // bn-BD + 0x0a300098: 68, // bn-IN + 0x0a700000: 69, // bo + 0x0a700052: 70, // bo-CN + 0x0a700098: 71, // bo-IN + 0x0b000000: 72, // br + 0x0b000077: 73, // br-FR + 0x0b300000: 74, // brx + 0x0b300098: 75, // brx-IN + 0x0b500000: 76, // bs + 0x0b51e000: 77, // bs-Cyrl + 0x0b51e032: 78, // bs-Cyrl-BA + 0x0b552000: 79, // bs-Latn + 0x0b552032: 80, // bs-Latn-BA + 0x0d500000: 81, // ca + 0x0d500021: 82, // ca-AD + 0x0d50006d: 83, // ca-ES + 0x0d500077: 84, // ca-FR + 0x0d50009d: 85, // ca-IT + 0x0da00000: 86, // ce + 0x0da00105: 87, // ce-RU + 0x0dd00000: 88, // cgg + 0x0dd00130: 89, // cgg-UG + 0x0e300000: 90, // chr + 0x0e300134: 91, // chr-US + 0x0e700000: 92, // ckb + 0x0e70009a: 93, // ckb-IQ + 0x0e70009b: 94, // ckb-IR + 0x0f600000: 95, // cs + 0x0f60005d: 96, // cs-CZ + 0x0fa00000: 97, // cu + 0x0fa00105: 98, // cu-RU + 0x0fc00000: 99, // cy + 0x0fc0007a: 100, // cy-GB + 0x0fd00000: 101, // da + 0x0fd00062: 102, // da-DK + 0x0fd00081: 103, // da-GL + 0x10400000: 104, // dav + 0x104000a3: 105, // dav-KE + 0x10900000: 106, // de + 0x1090002d: 107, // de-AT + 0x10900035: 108, // de-BE + 0x1090004d: 109, // de-CH + 0x1090005f: 110, // de-DE + 0x1090009d: 111, // de-IT + 0x109000b1: 112, // de-LI + 0x109000b6: 113, // de-LU + 0x11300000: 114, // dje + 0x113000d3: 115, // dje-NE + 0x11b00000: 116, // dsb + 0x11b0005f: 117, // dsb-DE + 0x12000000: 118, // dua + 0x12000051: 119, // dua-CM + 0x12400000: 120, // dv + 0x12700000: 121, // dyo + 0x12700113: 122, // dyo-SN + 0x12900000: 123, // dz + 0x12900042: 124, // dz-BT + 0x12b00000: 125, // ebu + 0x12b000a3: 126, // ebu-KE + 0x12c00000: 127, // ee + 0x12c0007f: 128, // ee-GH + 0x12c00121: 129, // ee-TG + 0x13100000: 130, // el + 0x1310005c: 131, // el-CY + 0x13100086: 132, // el-GR + 0x13400000: 133, // en + 0x13400001: 134, // en-001 + 0x1340001a: 135, // en-150 + 0x13400024: 136, // en-AG + 0x13400025: 137, // en-AI + 0x1340002c: 138, // en-AS + 0x1340002d: 139, // en-AT + 0x1340002e: 140, // en-AU + 0x13400033: 141, // en-BB + 0x13400035: 142, // en-BE + 0x13400039: 143, // en-BI + 0x1340003c: 144, // en-BM + 0x13400041: 145, // en-BS + 0x13400045: 146, // en-BW + 0x13400047: 147, // en-BZ + 0x13400048: 148, // en-CA + 0x13400049: 149, // en-CC + 0x1340004d: 150, // en-CH + 0x1340004f: 151, // en-CK + 0x13400051: 152, // en-CM + 0x1340005b: 153, // en-CX + 0x1340005c: 154, // en-CY + 0x1340005f: 155, // en-DE + 0x13400060: 156, // en-DG + 0x13400062: 157, // en-DK + 0x13400063: 158, // en-DM + 0x1340006c: 159, // en-ER + 0x13400071: 160, // en-FI + 0x13400072: 161, // en-FJ + 0x13400073: 162, // en-FK + 0x13400074: 163, // en-FM + 0x1340007a: 164, // en-GB + 0x1340007b: 165, // en-GD + 0x1340007e: 166, // en-GG + 0x1340007f: 167, // en-GH + 0x13400080: 168, // en-GI + 0x13400082: 169, // en-GM + 0x13400089: 170, // en-GU + 0x1340008b: 171, // en-GY + 0x1340008c: 172, // en-HK + 0x13400095: 173, // en-IE + 0x13400096: 174, // en-IL + 0x13400097: 175, // en-IM + 0x13400098: 176, // en-IN + 0x13400099: 177, // en-IO + 0x1340009e: 178, // en-JE + 0x1340009f: 179, // en-JM + 0x134000a3: 180, // en-KE + 0x134000a6: 181, // en-KI + 0x134000a8: 182, // en-KN + 0x134000ac: 183, // en-KY + 0x134000b0: 184, // en-LC + 0x134000b3: 185, // en-LR + 0x134000b4: 186, // en-LS + 0x134000be: 187, // en-MG + 0x134000bf: 188, // en-MH + 0x134000c5: 189, // en-MO + 0x134000c6: 190, // en-MP + 0x134000c9: 191, // en-MS + 0x134000ca: 192, // en-MT + 0x134000cb: 193, // en-MU + 0x134000cd: 194, // en-MW + 0x134000cf: 195, // en-MY + 0x134000d1: 196, // en-NA + 0x134000d4: 197, // en-NF + 0x134000d5: 198, // en-NG + 0x134000d8: 199, // en-NL + 0x134000dc: 200, // en-NR + 0x134000de: 201, // en-NU + 0x134000df: 202, // en-NZ + 0x134000e5: 203, // en-PG + 0x134000e6: 204, // en-PH + 0x134000e7: 205, // en-PK + 0x134000ea: 206, // en-PN + 0x134000eb: 207, // en-PR + 0x134000ef: 208, // en-PW + 0x13400106: 209, // en-RW + 0x13400108: 210, // en-SB + 0x13400109: 211, // en-SC + 0x1340010a: 212, // en-SD + 0x1340010b: 213, // en-SE + 0x1340010c: 214, // en-SG + 0x1340010d: 215, // en-SH + 0x1340010e: 216, // en-SI + 0x13400111: 217, // en-SL + 0x13400116: 218, // en-SS + 0x1340011a: 219, // en-SX + 0x1340011c: 220, // en-SZ + 0x1340011e: 221, // en-TC + 0x13400124: 222, // en-TK + 0x13400128: 223, // en-TO + 0x1340012b: 224, // en-TT + 0x1340012c: 225, // en-TV + 0x1340012e: 226, // en-TZ + 0x13400130: 227, // en-UG + 0x13400132: 228, // en-UM + 0x13400134: 229, // en-US + 0x13400138: 230, // en-VC + 0x1340013b: 231, // en-VG + 0x1340013c: 232, // en-VI + 0x1340013e: 233, // en-VU + 0x13400141: 234, // en-WS + 0x13400160: 235, // en-ZA + 0x13400161: 236, // en-ZM + 0x13400163: 237, // en-ZW + 0x13700000: 238, // eo + 0x13700001: 239, // eo-001 + 0x13900000: 240, // es + 0x1390001e: 241, // es-419 + 0x1390002b: 242, // es-AR + 0x1390003e: 243, // es-BO + 0x13900040: 244, // es-BR + 0x13900050: 245, // es-CL + 0x13900053: 246, // es-CO + 0x13900055: 247, // es-CR + 0x13900058: 248, // es-CU + 0x13900064: 249, // es-DO + 0x13900067: 250, // es-EA + 0x13900068: 251, // es-EC + 0x1390006d: 252, // es-ES + 0x13900085: 253, // es-GQ + 0x13900088: 254, // es-GT + 0x1390008e: 255, // es-HN + 0x13900093: 256, // es-IC + 0x139000ce: 257, // es-MX + 0x139000d7: 258, // es-NI + 0x139000e1: 259, // es-PA + 0x139000e3: 260, // es-PE + 0x139000e6: 261, // es-PH + 0x139000eb: 262, // es-PR + 0x139000f0: 263, // es-PY + 0x13900119: 264, // es-SV + 0x13900134: 265, // es-US + 0x13900135: 266, // es-UY + 0x1390013a: 267, // es-VE + 0x13b00000: 268, // et + 0x13b00069: 269, // et-EE + 0x14000000: 270, // eu + 0x1400006d: 271, // eu-ES + 0x14100000: 272, // ewo + 0x14100051: 273, // ewo-CM + 0x14300000: 274, // fa + 0x14300023: 275, // fa-AF + 0x1430009b: 276, // fa-IR + 0x14900000: 277, // ff + 0x14900051: 278, // ff-CM + 0x14900083: 279, // ff-GN + 0x149000c8: 280, // ff-MR + 0x14900113: 281, // ff-SN + 0x14c00000: 282, // fi + 0x14c00071: 283, // fi-FI + 0x14e00000: 284, // fil + 0x14e000e6: 285, // fil-PH + 0x15300000: 286, // fo + 0x15300062: 287, // fo-DK + 0x15300075: 288, // fo-FO + 0x15900000: 289, // fr + 0x15900035: 290, // fr-BE + 0x15900036: 291, // fr-BF + 0x15900039: 292, // fr-BI + 0x1590003a: 293, // fr-BJ + 0x1590003b: 294, // fr-BL + 0x15900048: 295, // fr-CA + 0x1590004a: 296, // fr-CD + 0x1590004b: 297, // fr-CF + 0x1590004c: 298, // fr-CG + 0x1590004d: 299, // fr-CH + 0x1590004e: 300, // fr-CI + 0x15900051: 301, // fr-CM + 0x15900061: 302, // fr-DJ + 0x15900066: 303, // fr-DZ + 0x15900077: 304, // fr-FR + 0x15900079: 305, // fr-GA + 0x1590007d: 306, // fr-GF + 0x15900083: 307, // fr-GN + 0x15900084: 308, // fr-GP + 0x15900085: 309, // fr-GQ + 0x15900090: 310, // fr-HT + 0x159000a7: 311, // fr-KM + 0x159000b6: 312, // fr-LU + 0x159000b9: 313, // fr-MA + 0x159000ba: 314, // fr-MC + 0x159000bd: 315, // fr-MF + 0x159000be: 316, // fr-MG + 0x159000c2: 317, // fr-ML + 0x159000c7: 318, // fr-MQ + 0x159000c8: 319, // fr-MR + 0x159000cb: 320, // fr-MU + 0x159000d2: 321, // fr-NC + 0x159000d3: 322, // fr-NE + 0x159000e4: 323, // fr-PF + 0x159000e9: 324, // fr-PM + 0x15900101: 325, // fr-RE + 0x15900106: 326, // fr-RW + 0x15900109: 327, // fr-SC + 0x15900113: 328, // fr-SN + 0x1590011b: 329, // fr-SY + 0x1590011f: 330, // fr-TD + 0x15900121: 331, // fr-TG + 0x15900127: 332, // fr-TN + 0x1590013e: 333, // fr-VU + 0x1590013f: 334, // fr-WF + 0x1590015e: 335, // fr-YT + 0x16400000: 336, // fur + 0x1640009d: 337, // fur-IT + 0x16800000: 338, // fy + 0x168000d8: 339, // fy-NL + 0x16900000: 340, // ga + 0x16900095: 341, // ga-IE + 0x17800000: 342, // gd + 0x1780007a: 343, // gd-GB + 0x18a00000: 344, // gl + 0x18a0006d: 345, // gl-ES + 0x19c00000: 346, // gsw + 0x19c0004d: 347, // gsw-CH + 0x19c00077: 348, // gsw-FR + 0x19c000b1: 349, // gsw-LI + 0x19d00000: 350, // gu + 0x19d00098: 351, // gu-IN + 0x1a200000: 352, // guw + 0x1a400000: 353, // guz + 0x1a4000a3: 354, // guz-KE + 0x1a500000: 355, // gv + 0x1a500097: 356, // gv-IM + 0x1ad00000: 357, // ha + 0x1ad0007f: 358, // ha-GH + 0x1ad000d3: 359, // ha-NE + 0x1ad000d5: 360, // ha-NG + 0x1b100000: 361, // haw + 0x1b100134: 362, // haw-US + 0x1b500000: 363, // he + 0x1b500096: 364, // he-IL + 0x1b700000: 365, // hi + 0x1b700098: 366, // hi-IN + 0x1ca00000: 367, // hr + 0x1ca00032: 368, // hr-BA + 0x1ca0008f: 369, // hr-HR + 0x1cb00000: 370, // hsb + 0x1cb0005f: 371, // hsb-DE + 0x1ce00000: 372, // hu + 0x1ce00091: 373, // hu-HU + 0x1d000000: 374, // hy + 0x1d000027: 375, // hy-AM + 0x1da00000: 376, // id + 0x1da00094: 377, // id-ID + 0x1df00000: 378, // ig + 0x1df000d5: 379, // ig-NG + 0x1e200000: 380, // ii + 0x1e200052: 381, // ii-CN + 0x1f000000: 382, // is + 0x1f00009c: 383, // is-IS + 0x1f100000: 384, // it + 0x1f10004d: 385, // it-CH + 0x1f10009d: 386, // it-IT + 0x1f100112: 387, // it-SM + 0x1f200000: 388, // iu + 0x1f800000: 389, // ja + 0x1f8000a1: 390, // ja-JP + 0x1fb00000: 391, // jbo + 0x1ff00000: 392, // jgo + 0x1ff00051: 393, // jgo-CM + 0x20200000: 394, // jmc + 0x2020012e: 395, // jmc-TZ + 0x20600000: 396, // jv + 0x20800000: 397, // ka + 0x2080007c: 398, // ka-GE + 0x20a00000: 399, // kab + 0x20a00066: 400, // kab-DZ + 0x20e00000: 401, // kaj + 0x20f00000: 402, // kam + 0x20f000a3: 403, // kam-KE + 0x21700000: 404, // kcg + 0x21b00000: 405, // kde + 0x21b0012e: 406, // kde-TZ + 0x21f00000: 407, // kea + 0x21f00059: 408, // kea-CV + 0x22c00000: 409, // khq + 0x22c000c2: 410, // khq-ML + 0x23100000: 411, // ki + 0x231000a3: 412, // ki-KE + 0x23a00000: 413, // kk + 0x23a000ad: 414, // kk-KZ + 0x23c00000: 415, // kkj + 0x23c00051: 416, // kkj-CM + 0x23d00000: 417, // kl + 0x23d00081: 418, // kl-GL + 0x23e00000: 419, // kln + 0x23e000a3: 420, // kln-KE + 0x24200000: 421, // km + 0x242000a5: 422, // km-KH + 0x24900000: 423, // kn + 0x24900098: 424, // kn-IN + 0x24b00000: 425, // ko + 0x24b000a9: 426, // ko-KP + 0x24b000aa: 427, // ko-KR + 0x24d00000: 428, // kok + 0x24d00098: 429, // kok-IN + 0x26100000: 430, // ks + 0x26100098: 431, // ks-IN + 0x26200000: 432, // ksb + 0x2620012e: 433, // ksb-TZ + 0x26400000: 434, // ksf + 0x26400051: 435, // ksf-CM + 0x26500000: 436, // ksh + 0x2650005f: 437, // ksh-DE + 0x26b00000: 438, // ku + 0x27800000: 439, // kw + 0x2780007a: 440, // kw-GB + 0x28100000: 441, // ky + 0x281000a4: 442, // ky-KG + 0x28800000: 443, // lag + 0x2880012e: 444, // lag-TZ + 0x28c00000: 445, // lb + 0x28c000b6: 446, // lb-LU + 0x29a00000: 447, // lg + 0x29a00130: 448, // lg-UG + 0x2a600000: 449, // lkt + 0x2a600134: 450, // lkt-US + 0x2ac00000: 451, // ln + 0x2ac00029: 452, // ln-AO + 0x2ac0004a: 453, // ln-CD + 0x2ac0004b: 454, // ln-CF + 0x2ac0004c: 455, // ln-CG + 0x2af00000: 456, // lo + 0x2af000ae: 457, // lo-LA + 0x2b600000: 458, // lrc + 0x2b60009a: 459, // lrc-IQ + 0x2b60009b: 460, // lrc-IR + 0x2b700000: 461, // lt + 0x2b7000b5: 462, // lt-LT + 0x2b900000: 463, // lu + 0x2b90004a: 464, // lu-CD + 0x2bb00000: 465, // luo + 0x2bb000a3: 466, // luo-KE + 0x2bc00000: 467, // luy + 0x2bc000a3: 468, // luy-KE + 0x2be00000: 469, // lv + 0x2be000b7: 470, // lv-LV + 0x2c800000: 471, // mas + 0x2c8000a3: 472, // mas-KE + 0x2c80012e: 473, // mas-TZ + 0x2e000000: 474, // mer + 0x2e0000a3: 475, // mer-KE + 0x2e400000: 476, // mfe + 0x2e4000cb: 477, // mfe-MU + 0x2e800000: 478, // mg + 0x2e8000be: 479, // mg-MG + 0x2e900000: 480, // mgh + 0x2e9000d0: 481, // mgh-MZ + 0x2eb00000: 482, // mgo + 0x2eb00051: 483, // mgo-CM + 0x2f600000: 484, // mk + 0x2f6000c1: 485, // mk-MK + 0x2fb00000: 486, // ml + 0x2fb00098: 487, // ml-IN + 0x30200000: 488, // mn + 0x302000c4: 489, // mn-MN + 0x31200000: 490, // mr + 0x31200098: 491, // mr-IN + 0x31600000: 492, // ms + 0x3160003d: 493, // ms-BN + 0x316000cf: 494, // ms-MY + 0x3160010c: 495, // ms-SG + 0x31700000: 496, // mt + 0x317000ca: 497, // mt-MT + 0x31c00000: 498, // mua + 0x31c00051: 499, // mua-CM + 0x32800000: 500, // my + 0x328000c3: 501, // my-MM + 0x33100000: 502, // mzn + 0x3310009b: 503, // mzn-IR + 0x33800000: 504, // nah + 0x33c00000: 505, // naq + 0x33c000d1: 506, // naq-NA + 0x33e00000: 507, // nb + 0x33e000d9: 508, // nb-NO + 0x33e0010f: 509, // nb-SJ + 0x34500000: 510, // nd + 0x34500163: 511, // nd-ZW + 0x34700000: 512, // nds + 0x3470005f: 513, // nds-DE + 0x347000d8: 514, // nds-NL + 0x34800000: 515, // ne + 0x34800098: 516, // ne-IN + 0x348000da: 517, // ne-NP + 0x35e00000: 518, // nl + 0x35e0002f: 519, // nl-AW + 0x35e00035: 520, // nl-BE + 0x35e0003f: 521, // nl-BQ + 0x35e0005a: 522, // nl-CW + 0x35e000d8: 523, // nl-NL + 0x35e00115: 524, // nl-SR + 0x35e0011a: 525, // nl-SX + 0x35f00000: 526, // nmg + 0x35f00051: 527, // nmg-CM + 0x36100000: 528, // nn + 0x361000d9: 529, // nn-NO + 0x36300000: 530, // nnh + 0x36300051: 531, // nnh-CM + 0x36600000: 532, // no + 0x36c00000: 533, // nqo + 0x36d00000: 534, // nr + 0x37100000: 535, // nso + 0x37700000: 536, // nus + 0x37700116: 537, // nus-SS + 0x37e00000: 538, // ny + 0x38000000: 539, // nyn + 0x38000130: 540, // nyn-UG + 0x38700000: 541, // om + 0x3870006e: 542, // om-ET + 0x387000a3: 543, // om-KE + 0x38c00000: 544, // or + 0x38c00098: 545, // or-IN + 0x38f00000: 546, // os + 0x38f0007c: 547, // os-GE + 0x38f00105: 548, // os-RU + 0x39400000: 549, // pa + 0x39405000: 550, // pa-Arab + 0x394050e7: 551, // pa-Arab-PK + 0x3942f000: 552, // pa-Guru + 0x3942f098: 553, // pa-Guru-IN + 0x39800000: 554, // pap + 0x3aa00000: 555, // pl + 0x3aa000e8: 556, // pl-PL + 0x3b400000: 557, // prg + 0x3b400001: 558, // prg-001 + 0x3b500000: 559, // ps + 0x3b500023: 560, // ps-AF + 0x3b700000: 561, // pt + 0x3b700029: 562, // pt-AO + 0x3b700040: 563, // pt-BR + 0x3b70004d: 564, // pt-CH + 0x3b700059: 565, // pt-CV + 0x3b700085: 566, // pt-GQ + 0x3b70008a: 567, // pt-GW + 0x3b7000b6: 568, // pt-LU + 0x3b7000c5: 569, // pt-MO + 0x3b7000d0: 570, // pt-MZ + 0x3b7000ed: 571, // pt-PT + 0x3b700117: 572, // pt-ST + 0x3b700125: 573, // pt-TL + 0x3bb00000: 574, // qu + 0x3bb0003e: 575, // qu-BO + 0x3bb00068: 576, // qu-EC + 0x3bb000e3: 577, // qu-PE + 0x3cb00000: 578, // rm + 0x3cb0004d: 579, // rm-CH + 0x3d000000: 580, // rn + 0x3d000039: 581, // rn-BI + 0x3d300000: 582, // ro + 0x3d3000bb: 583, // ro-MD + 0x3d300103: 584, // ro-RO + 0x3d500000: 585, // rof + 0x3d50012e: 586, // rof-TZ + 0x3d900000: 587, // ru + 0x3d900046: 588, // ru-BY + 0x3d9000a4: 589, // ru-KG + 0x3d9000ad: 590, // ru-KZ + 0x3d9000bb: 591, // ru-MD + 0x3d900105: 592, // ru-RU + 0x3d90012f: 593, // ru-UA + 0x3dc00000: 594, // rw + 0x3dc00106: 595, // rw-RW + 0x3dd00000: 596, // rwk + 0x3dd0012e: 597, // rwk-TZ + 0x3e200000: 598, // sah + 0x3e200105: 599, // sah-RU + 0x3e300000: 600, // saq + 0x3e3000a3: 601, // saq-KE + 0x3e900000: 602, // sbp + 0x3e90012e: 603, // sbp-TZ + 0x3f200000: 604, // sdh + 0x3f300000: 605, // se + 0x3f300071: 606, // se-FI + 0x3f3000d9: 607, // se-NO + 0x3f30010b: 608, // se-SE + 0x3f500000: 609, // seh + 0x3f5000d0: 610, // seh-MZ + 0x3f700000: 611, // ses + 0x3f7000c2: 612, // ses-ML + 0x3f800000: 613, // sg + 0x3f80004b: 614, // sg-CF + 0x3fe00000: 615, // shi + 0x3fe52000: 616, // shi-Latn + 0x3fe520b9: 617, // shi-Latn-MA + 0x3fed2000: 618, // shi-Tfng + 0x3fed20b9: 619, // shi-Tfng-MA + 0x40200000: 620, // si + 0x402000b2: 621, // si-LK + 0x40800000: 622, // sk + 0x40800110: 623, // sk-SK + 0x40c00000: 624, // sl + 0x40c0010e: 625, // sl-SI + 0x41200000: 626, // sma + 0x41300000: 627, // smi + 0x41400000: 628, // smj + 0x41500000: 629, // smn + 0x41500071: 630, // smn-FI + 0x41800000: 631, // sms + 0x41900000: 632, // sn + 0x41900163: 633, // sn-ZW + 0x41f00000: 634, // so + 0x41f00061: 635, // so-DJ + 0x41f0006e: 636, // so-ET + 0x41f000a3: 637, // so-KE + 0x41f00114: 638, // so-SO + 0x42700000: 639, // sq + 0x42700026: 640, // sq-AL + 0x427000c1: 641, // sq-MK + 0x4270014c: 642, // sq-XK + 0x42800000: 643, // sr + 0x4281e000: 644, // sr-Cyrl + 0x4281e032: 645, // sr-Cyrl-BA + 0x4281e0bc: 646, // sr-Cyrl-ME + 0x4281e104: 647, // sr-Cyrl-RS + 0x4281e14c: 648, // sr-Cyrl-XK + 0x42852000: 649, // sr-Latn + 0x42852032: 650, // sr-Latn-BA + 0x428520bc: 651, // sr-Latn-ME + 0x42852104: 652, // sr-Latn-RS + 0x4285214c: 653, // sr-Latn-XK + 0x42d00000: 654, // ss + 0x43000000: 655, // ssy + 0x43100000: 656, // st + 0x43a00000: 657, // sv + 0x43a00030: 658, // sv-AX + 0x43a00071: 659, // sv-FI + 0x43a0010b: 660, // sv-SE + 0x43b00000: 661, // sw + 0x43b0004a: 662, // sw-CD + 0x43b000a3: 663, // sw-KE + 0x43b0012e: 664, // sw-TZ + 0x43b00130: 665, // sw-UG + 0x44400000: 666, // syr + 0x44600000: 667, // ta + 0x44600098: 668, // ta-IN + 0x446000b2: 669, // ta-LK + 0x446000cf: 670, // ta-MY + 0x4460010c: 671, // ta-SG + 0x45700000: 672, // te + 0x45700098: 673, // te-IN + 0x45a00000: 674, // teo + 0x45a000a3: 675, // teo-KE + 0x45a00130: 676, // teo-UG + 0x46100000: 677, // th + 0x46100122: 678, // th-TH + 0x46500000: 679, // ti + 0x4650006c: 680, // ti-ER + 0x4650006e: 681, // ti-ET + 0x46700000: 682, // tig + 0x46c00000: 683, // tk + 0x46c00126: 684, // tk-TM + 0x47600000: 685, // tn + 0x47800000: 686, // to + 0x47800128: 687, // to-TO + 0x48000000: 688, // tr + 0x4800005c: 689, // tr-CY + 0x4800012a: 690, // tr-TR + 0x48400000: 691, // ts + 0x49a00000: 692, // twq + 0x49a000d3: 693, // twq-NE + 0x49f00000: 694, // tzm + 0x49f000b9: 695, // tzm-MA + 0x4a200000: 696, // ug + 0x4a200052: 697, // ug-CN + 0x4a400000: 698, // uk + 0x4a40012f: 699, // uk-UA + 0x4aa00000: 700, // ur + 0x4aa00098: 701, // ur-IN + 0x4aa000e7: 702, // ur-PK + 0x4b200000: 703, // uz + 0x4b205000: 704, // uz-Arab + 0x4b205023: 705, // uz-Arab-AF + 0x4b21e000: 706, // uz-Cyrl + 0x4b21e136: 707, // uz-Cyrl-UZ + 0x4b252000: 708, // uz-Latn + 0x4b252136: 709, // uz-Latn-UZ + 0x4b400000: 710, // vai + 0x4b452000: 711, // vai-Latn + 0x4b4520b3: 712, // vai-Latn-LR + 0x4b4d9000: 713, // vai-Vaii + 0x4b4d90b3: 714, // vai-Vaii-LR + 0x4b600000: 715, // ve + 0x4b900000: 716, // vi + 0x4b90013d: 717, // vi-VN + 0x4bf00000: 718, // vo + 0x4bf00001: 719, // vo-001 + 0x4c200000: 720, // vun + 0x4c20012e: 721, // vun-TZ + 0x4c400000: 722, // wa + 0x4c500000: 723, // wae + 0x4c50004d: 724, // wae-CH + 0x4db00000: 725, // wo + 0x4e800000: 726, // xh + 0x4f100000: 727, // xog + 0x4f100130: 728, // xog-UG + 0x4ff00000: 729, // yav + 0x4ff00051: 730, // yav-CM + 0x50800000: 731, // yi + 0x50800001: 732, // yi-001 + 0x50e00000: 733, // yo + 0x50e0003a: 734, // yo-BJ + 0x50e000d5: 735, // yo-NG + 0x51500000: 736, // yue + 0x5150008c: 737, // yue-HK + 0x51e00000: 738, // zgh + 0x51e000b9: 739, // zgh-MA + 0x51f00000: 740, // zh + 0x51f34000: 741, // zh-Hans + 0x51f34052: 742, // zh-Hans-CN + 0x51f3408c: 743, // zh-Hans-HK + 0x51f340c5: 744, // zh-Hans-MO + 0x51f3410c: 745, // zh-Hans-SG + 0x51f35000: 746, // zh-Hant + 0x51f3508c: 747, // zh-Hant-HK + 0x51f350c5: 748, // zh-Hant-MO + 0x51f3512d: 749, // zh-Hant-TW + 0x52400000: 750, // zu + 0x52400160: 751, // zu-ZA +} + +// Total table size 4580 bytes (4KiB); checksum: A7F72A2A diff --git a/vendor/golang.org/x/text/language/language.go b/vendor/golang.org/x/text/language/language.go new file mode 100644 index 000000000..5eecceb61 --- /dev/null +++ b/vendor/golang.org/x/text/language/language.go @@ -0,0 +1,975 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run maketables.go gen_common.go -output tables.go +//go:generate go run gen_index.go + +// Package language implements BCP 47 language tags and related functionality. +// +// The Tag type, which is used to represent languages, is agnostic to the +// meaning of its subtags. Tags are not fully canonicalized to preserve +// information that may be valuable in certain contexts. As a consequence, two +// different tags may represent identical languages. +// +// Initializing language- or locale-specific components usually consists of +// two steps. The first step is to select a display language based on the +// preferred languages of the user and the languages supported by an application. +// The second step is to create the language-specific services based on +// this selection. Each is discussed in more details below. +// +// Matching preferred against supported languages +// +// An application may support various languages. This list is typically limited +// by the languages for which there exists translations of the user interface. +// Similarly, a user may provide a list of preferred languages which is limited +// by the languages understood by this user. +// An application should use a Matcher to find the best supported language based +// on the user's preferred list. +// Matchers are aware of the intricacies of equivalence between languages. +// The default Matcher implementation takes into account things such as +// deprecated subtags, legacy tags, and mutual intelligibility between scripts +// and languages. +// +// A Matcher for English, Australian English, Danish, and standard Mandarin can +// be defined as follows: +// +// var matcher = language.NewMatcher([]language.Tag{ +// language.English, // The first language is used as fallback. +// language.MustParse("en-AU"), +// language.Danish, +// language.Chinese, +// }) +// +// The following code selects the best match for someone speaking Spanish and +// Norwegian: +// +// preferred := []language.Tag{ language.Spanish, language.Norwegian } +// tag, _, _ := matcher.Match(preferred...) +// +// In this case, the best match is Danish, as Danish is sufficiently a match to +// Norwegian to not have to fall back to the default. +// See ParseAcceptLanguage on how to handle the Accept-Language HTTP header. +// +// Selecting language-specific services +// +// One should always use the Tag returned by the Matcher to create an instance +// of any of the language-specific services provided by the text repository. +// This prevents the mixing of languages, such as having a different language for +// messages and display names, as well as improper casing or sorting order for +// the selected language. +// Using the returned Tag also allows user-defined settings, such as collation +// order or numbering system to be transparently passed as options. +// +// If you have language-specific data in your application, however, it will in +// most cases suffice to use the index returned by the matcher to identify +// the user language. +// The following loop provides an alternative in case this is not sufficient: +// +// supported := map[language.Tag]data{ +// language.English: enData, +// language.MustParse("en-AU"): enAUData, +// language.Danish: daData, +// language.Chinese: zhData, +// } +// tag, _, _ := matcher.Match(preferred...) +// for ; tag != language.Und; tag = tag.Parent() { +// if v, ok := supported[tag]; ok { +// return v +// } +// } +// return enData // should not reach here +// +// Repeatedly taking the Parent of the tag returned by Match will eventually +// match one of the tags used to initialize the Matcher. +// +// Canonicalization +// +// By default, only legacy and deprecated tags are converted into their +// canonical equivalent. All other information is preserved. This approach makes +// the confidence scores more accurate and allows matchers to distinguish +// between variants that are otherwise lost. +// +// As a consequence, two tags that should be treated as identical according to +// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The +// Matchers will handle such distinctions, though, and are aware of the +// equivalence relations. The CanonType type can be used to alter the +// canonicalization form. +// +// References +// +// BCP 47 - Tags for Identifying Languages +// http://tools.ietf.org/html/bcp47 +package language // import "golang.org/x/text/language" + +// TODO: Remove above NOTE after: +// - verifying that tables are dropped correctly (most notably matcher tables). + +import ( + "errors" + "fmt" + "strings" +) + +const ( + // maxCoreSize is the maximum size of a BCP 47 tag without variants and + // extensions. Equals max lang (3) + script (4) + max reg (3) + 2 dashes. + maxCoreSize = 12 + + // max99thPercentileSize is a somewhat arbitrary buffer size that presumably + // is large enough to hold at least 99% of the BCP 47 tags. + max99thPercentileSize = 32 + + // maxSimpleUExtensionSize is the maximum size of a -u extension with one + // key-type pair. Equals len("-u-") + key (2) + dash + max value (8). + maxSimpleUExtensionSize = 14 +) + +// Tag represents a BCP 47 language tag. It is used to specify an instance of a +// specific language or locale. All language tag values are guaranteed to be +// well-formed. +type Tag struct { + lang langID + region regionID + script scriptID + pVariant byte // offset in str, includes preceding '-' + pExt uint16 // offset of first extension, includes preceding '-' + + // str is the string representation of the Tag. It will only be used if the + // tag has variants or extensions. + str string +} + +// Make is a convenience wrapper for Parse that omits the error. +// In case of an error, a sensible default is returned. +func Make(s string) Tag { + return Default.Make(s) +} + +// Make is a convenience wrapper for c.Parse that omits the error. +// In case of an error, a sensible default is returned. +func (c CanonType) Make(s string) Tag { + t, _ := c.Parse(s) + return t +} + +// Raw returns the raw base language, script and region, without making an +// attempt to infer their values. +func (t Tag) Raw() (b Base, s Script, r Region) { + return Base{t.lang}, Script{t.script}, Region{t.region} +} + +// equalTags compares language, script and region subtags only. +func (t Tag) equalTags(a Tag) bool { + return t.lang == a.lang && t.script == a.script && t.region == a.region +} + +// IsRoot returns true if t is equal to language "und". +func (t Tag) IsRoot() bool { + if int(t.pVariant) < len(t.str) { + return false + } + return t.equalTags(und) +} + +// private reports whether the Tag consists solely of a private use tag. +func (t Tag) private() bool { + return t.str != "" && t.pVariant == 0 +} + +// CanonType can be used to enable or disable various types of canonicalization. +type CanonType int + +const ( + // Replace deprecated base languages with their preferred replacements. + DeprecatedBase CanonType = 1 << iota + // Replace deprecated scripts with their preferred replacements. + DeprecatedScript + // Replace deprecated regions with their preferred replacements. + DeprecatedRegion + // Remove redundant scripts. + SuppressScript + // Normalize legacy encodings. This includes legacy languages defined in + // CLDR as well as bibliographic codes defined in ISO-639. + Legacy + // Map the dominant language of a macro language group to the macro language + // subtag. For example cmn -> zh. + Macro + // The CLDR flag should be used if full compatibility with CLDR is required. + // There are a few cases where language.Tag may differ from CLDR. To follow all + // of CLDR's suggestions, use All|CLDR. + CLDR + + // Raw can be used to Compose or Parse without Canonicalization. + Raw CanonType = 0 + + // Replace all deprecated tags with their preferred replacements. + Deprecated = DeprecatedBase | DeprecatedScript | DeprecatedRegion + + // All canonicalizations recommended by BCP 47. + BCP47 = Deprecated | SuppressScript + + // All canonicalizations. + All = BCP47 | Legacy | Macro + + // Default is the canonicalization used by Parse, Make and Compose. To + // preserve as much information as possible, canonicalizations that remove + // potentially valuable information are not included. The Matcher is + // designed to recognize similar tags that would be the same if + // they were canonicalized using All. + Default = Deprecated | Legacy + + canonLang = DeprecatedBase | Legacy | Macro + + // TODO: LikelyScript, LikelyRegion: suppress similar to ICU. +) + +// canonicalize returns the canonicalized equivalent of the tag and +// whether there was any change. +func (t Tag) canonicalize(c CanonType) (Tag, bool) { + if c == Raw { + return t, false + } + changed := false + if c&SuppressScript != 0 { + if t.lang < langNoIndexOffset && uint8(t.script) == suppressScript[t.lang] { + t.script = 0 + changed = true + } + } + if c&canonLang != 0 { + for { + if l, aliasType := normLang(t.lang); l != t.lang { + switch aliasType { + case langLegacy: + if c&Legacy != 0 { + if t.lang == _sh && t.script == 0 { + t.script = _Latn + } + t.lang = l + changed = true + } + case langMacro: + if c&Macro != 0 { + // We deviate here from CLDR. The mapping "nb" -> "no" + // qualifies as a typical Macro language mapping. However, + // for legacy reasons, CLDR maps "no", the macro language + // code for Norwegian, to the dominant variant "nb". This + // change is currently under consideration for CLDR as well. + // See http://unicode.org/cldr/trac/ticket/2698 and also + // http://unicode.org/cldr/trac/ticket/1790 for some of the + // practical implications. TODO: this check could be removed + // if CLDR adopts this change. + if c&CLDR == 0 || t.lang != _nb { + changed = true + t.lang = l + } + } + case langDeprecated: + if c&DeprecatedBase != 0 { + if t.lang == _mo && t.region == 0 { + t.region = _MD + } + t.lang = l + changed = true + // Other canonicalization types may still apply. + continue + } + } + } else if c&Legacy != 0 && t.lang == _no && c&CLDR != 0 { + t.lang = _nb + changed = true + } + break + } + } + if c&DeprecatedScript != 0 { + if t.script == _Qaai { + changed = true + t.script = _Zinh + } + } + if c&DeprecatedRegion != 0 { + if r := normRegion(t.region); r != 0 { + changed = true + t.region = r + } + } + return t, changed +} + +// Canonicalize returns the canonicalized equivalent of the tag. +func (c CanonType) Canonicalize(t Tag) (Tag, error) { + t, changed := t.canonicalize(c) + if changed { + t.remakeString() + } + return t, nil +} + +// Confidence indicates the level of certainty for a given return value. +// For example, Serbian may be written in Cyrillic or Latin script. +// The confidence level indicates whether a value was explicitly specified, +// whether it is typically the only possible value, or whether there is +// an ambiguity. +type Confidence int + +const ( + No Confidence = iota // full confidence that there was no match + Low // most likely value picked out of a set of alternatives + High // value is generally assumed to be the correct match + Exact // exact match or explicitly specified value +) + +var confName = []string{"No", "Low", "High", "Exact"} + +func (c Confidence) String() string { + return confName[c] +} + +// remakeString is used to update t.str in case lang, script or region changed. +// It is assumed that pExt and pVariant still point to the start of the +// respective parts. +func (t *Tag) remakeString() { + if t.str == "" { + return + } + extra := t.str[t.pVariant:] + if t.pVariant > 0 { + extra = extra[1:] + } + if t.equalTags(und) && strings.HasPrefix(extra, "x-") { + t.str = extra + t.pVariant = 0 + t.pExt = 0 + return + } + var buf [max99thPercentileSize]byte // avoid extra memory allocation in most cases. + b := buf[:t.genCoreBytes(buf[:])] + if extra != "" { + diff := len(b) - int(t.pVariant) + b = append(b, '-') + b = append(b, extra...) + t.pVariant = uint8(int(t.pVariant) + diff) + t.pExt = uint16(int(t.pExt) + diff) + } else { + t.pVariant = uint8(len(b)) + t.pExt = uint16(len(b)) + } + t.str = string(b) +} + +// genCoreBytes writes a string for the base languages, script and region tags +// to the given buffer and returns the number of bytes written. It will never +// write more than maxCoreSize bytes. +func (t *Tag) genCoreBytes(buf []byte) int { + n := t.lang.stringToBuf(buf[:]) + if t.script != 0 { + n += copy(buf[n:], "-") + n += copy(buf[n:], t.script.String()) + } + if t.region != 0 { + n += copy(buf[n:], "-") + n += copy(buf[n:], t.region.String()) + } + return n +} + +// String returns the canonical string representation of the language tag. +func (t Tag) String() string { + if t.str != "" { + return t.str + } + if t.script == 0 && t.region == 0 { + return t.lang.String() + } + buf := [maxCoreSize]byte{} + return string(buf[:t.genCoreBytes(buf[:])]) +} + +// Base returns the base language of the language tag. If the base language is +// unspecified, an attempt will be made to infer it from the context. +// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. +func (t Tag) Base() (Base, Confidence) { + if t.lang != 0 { + return Base{t.lang}, Exact + } + c := High + if t.script == 0 && !(Region{t.region}).IsCountry() { + c = Low + } + if tag, err := addTags(t); err == nil && tag.lang != 0 { + return Base{tag.lang}, c + } + return Base{0}, No +} + +// Script infers the script for the language tag. If it was not explicitly given, it will infer +// a most likely candidate. +// If more than one script is commonly used for a language, the most likely one +// is returned with a low confidence indication. For example, it returns (Cyrl, Low) +// for Serbian. +// If a script cannot be inferred (Zzzz, No) is returned. We do not use Zyyy (undetermined) +// as one would suspect from the IANA registry for BCP 47. In a Unicode context Zyyy marks +// common characters (like 1, 2, 3, '.', etc.) and is therefore more like multiple scripts. +// See http://www.unicode.org/reports/tr24/#Values for more details. Zzzz is also used for +// unknown value in CLDR. (Zzzz, Exact) is returned if Zzzz was explicitly specified. +// Note that an inferred script is never guaranteed to be the correct one. Latin is +// almost exclusively used for Afrikaans, but Arabic has been used for some texts +// in the past. Also, the script that is commonly used may change over time. +// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. +func (t Tag) Script() (Script, Confidence) { + if t.script != 0 { + return Script{t.script}, Exact + } + sc, c := scriptID(_Zzzz), No + if t.lang < langNoIndexOffset { + if scr := scriptID(suppressScript[t.lang]); scr != 0 { + // Note: it is not always the case that a language with a suppress + // script value is only written in one script (e.g. kk, ms, pa). + if t.region == 0 { + return Script{scriptID(scr)}, High + } + sc, c = scr, High + } + } + if tag, err := addTags(t); err == nil { + if tag.script != sc { + sc, c = tag.script, Low + } + } else { + t, _ = (Deprecated | Macro).Canonicalize(t) + if tag, err := addTags(t); err == nil && tag.script != sc { + sc, c = tag.script, Low + } + } + return Script{sc}, c +} + +// Region returns the region for the language tag. If it was not explicitly given, it will +// infer a most likely candidate from the context. +// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. +func (t Tag) Region() (Region, Confidence) { + if t.region != 0 { + return Region{t.region}, Exact + } + if t, err := addTags(t); err == nil { + return Region{t.region}, Low // TODO: differentiate between high and low. + } + t, _ = (Deprecated | Macro).Canonicalize(t) + if tag, err := addTags(t); err == nil { + return Region{tag.region}, Low + } + return Region{_ZZ}, No // TODO: return world instead of undetermined? +} + +// Variant returns the variants specified explicitly for this language tag. +// or nil if no variant was specified. +func (t Tag) Variants() []Variant { + v := []Variant{} + if int(t.pVariant) < int(t.pExt) { + for x, str := "", t.str[t.pVariant:t.pExt]; str != ""; { + x, str = nextToken(str) + v = append(v, Variant{x}) + } + } + return v +} + +// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a +// specific language are substituted with fields from the parent language. +// The parent for a language may change for newer versions of CLDR. +func (t Tag) Parent() Tag { + if t.str != "" { + // Strip the variants and extensions. + t, _ = Raw.Compose(t.Raw()) + if t.region == 0 && t.script != 0 && t.lang != 0 { + base, _ := addTags(Tag{lang: t.lang}) + if base.script == t.script { + return Tag{lang: t.lang} + } + } + return t + } + if t.lang != 0 { + if t.region != 0 { + maxScript := t.script + if maxScript == 0 { + max, _ := addTags(t) + maxScript = max.script + } + + for i := range parents { + if langID(parents[i].lang) == t.lang && scriptID(parents[i].maxScript) == maxScript { + for _, r := range parents[i].fromRegion { + if regionID(r) == t.region { + return Tag{ + lang: t.lang, + script: scriptID(parents[i].script), + region: regionID(parents[i].toRegion), + } + } + } + } + } + + // Strip the script if it is the default one. + base, _ := addTags(Tag{lang: t.lang}) + if base.script != maxScript { + return Tag{lang: t.lang, script: maxScript} + } + return Tag{lang: t.lang} + } else if t.script != 0 { + // The parent for an base-script pair with a non-default script is + // "und" instead of the base language. + base, _ := addTags(Tag{lang: t.lang}) + if base.script != t.script { + return und + } + return Tag{lang: t.lang} + } + } + return und +} + +// returns token t and the rest of the string. +func nextToken(s string) (t, tail string) { + p := strings.Index(s[1:], "-") + if p == -1 { + return s[1:], "" + } + p++ + return s[1:p], s[p:] +} + +// Extension is a single BCP 47 extension. +type Extension struct { + s string +} + +// String returns the string representation of the extension, including the +// type tag. +func (e Extension) String() string { + return e.s +} + +// ParseExtension parses s as an extension and returns it on success. +func ParseExtension(s string) (e Extension, err error) { + scan := makeScannerString(s) + var end int + if n := len(scan.token); n != 1 { + return Extension{}, errSyntax + } + scan.toLower(0, len(scan.b)) + end = parseExtension(&scan) + if end != len(s) { + return Extension{}, errSyntax + } + return Extension{string(scan.b)}, nil +} + +// Type returns the one-byte extension type of e. It returns 0 for the zero +// exception. +func (e Extension) Type() byte { + if e.s == "" { + return 0 + } + return e.s[0] +} + +// Tokens returns the list of tokens of e. +func (e Extension) Tokens() []string { + return strings.Split(e.s, "-") +} + +// Extension returns the extension of type x for tag t. It will return +// false for ok if t does not have the requested extension. The returned +// extension will be invalid in this case. +func (t Tag) Extension(x byte) (ext Extension, ok bool) { + for i := int(t.pExt); i < len(t.str)-1; { + var ext string + i, ext = getExtension(t.str, i) + if ext[0] == x { + return Extension{ext}, true + } + } + return Extension{}, false +} + +// Extensions returns all extensions of t. +func (t Tag) Extensions() []Extension { + e := []Extension{} + for i := int(t.pExt); i < len(t.str)-1; { + var ext string + i, ext = getExtension(t.str, i) + e = append(e, Extension{ext}) + } + return e +} + +// TypeForKey returns the type associated with the given key, where key and type +// are of the allowed values defined for the Unicode locale extension ('u') in +// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// TypeForKey will traverse the inheritance chain to get the correct value. +func (t Tag) TypeForKey(key string) string { + if start, end, _ := t.findTypeForKey(key); end != start { + return t.str[start:end] + } + return "" +} + +var ( + errPrivateUse = errors.New("cannot set a key on a private use tag") + errInvalidArguments = errors.New("invalid key or type") +) + +// SetTypeForKey returns a new Tag with the key set to type, where key and type +// are of the allowed values defined for the Unicode locale extension ('u') in +// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// An empty value removes an existing pair with the same key. +func (t Tag) SetTypeForKey(key, value string) (Tag, error) { + if t.private() { + return t, errPrivateUse + } + if len(key) != 2 { + return t, errInvalidArguments + } + + // Remove the setting if value is "". + if value == "" { + start, end, _ := t.findTypeForKey(key) + if start != end { + // Remove key tag and leading '-'. + start -= 4 + + // Remove a possible empty extension. + if (end == len(t.str) || t.str[end+2] == '-') && t.str[start-2] == '-' { + start -= 2 + } + if start == int(t.pVariant) && end == len(t.str) { + t.str = "" + t.pVariant, t.pExt = 0, 0 + } else { + t.str = fmt.Sprintf("%s%s", t.str[:start], t.str[end:]) + } + } + return t, nil + } + + if len(value) < 3 || len(value) > 8 { + return t, errInvalidArguments + } + + var ( + buf [maxCoreSize + maxSimpleUExtensionSize]byte + uStart int // start of the -u extension. + ) + + // Generate the tag string if needed. + if t.str == "" { + uStart = t.genCoreBytes(buf[:]) + buf[uStart] = '-' + uStart++ + } + + // Create new key-type pair and parse it to verify. + b := buf[uStart:] + copy(b, "u-") + copy(b[2:], key) + b[4] = '-' + b = b[:5+copy(b[5:], value)] + scan := makeScanner(b) + if parseExtensions(&scan); scan.err != nil { + return t, scan.err + } + + // Assemble the replacement string. + if t.str == "" { + t.pVariant, t.pExt = byte(uStart-1), uint16(uStart-1) + t.str = string(buf[:uStart+len(b)]) + } else { + s := t.str + start, end, hasExt := t.findTypeForKey(key) + if start == end { + if hasExt { + b = b[2:] + } + t.str = fmt.Sprintf("%s-%s%s", s[:start], b, s[end:]) + } else { + t.str = fmt.Sprintf("%s%s%s", s[:start], value, s[end:]) + } + } + return t, nil +} + +// findKeyAndType returns the start and end position for the type corresponding +// to key or the point at which to insert the key-value pair if the type +// wasn't found. The hasExt return value reports whether an -u extension was present. +// Note: the extensions are typically very small and are likely to contain +// only one key-type pair. +func (t Tag) findTypeForKey(key string) (start, end int, hasExt bool) { + p := int(t.pExt) + if len(key) != 2 || p == len(t.str) || p == 0 { + return p, p, false + } + s := t.str + + // Find the correct extension. + for p++; s[p] != 'u'; p++ { + if s[p] > 'u' { + p-- + return p, p, false + } + if p = nextExtension(s, p); p == len(s) { + return len(s), len(s), false + } + } + // Proceed to the hyphen following the extension name. + p++ + + // curKey is the key currently being processed. + curKey := "" + + // Iterate over keys until we get the end of a section. + for { + // p points to the hyphen preceding the current token. + if p3 := p + 3; s[p3] == '-' { + // Found a key. + // Check whether we just processed the key that was requested. + if curKey == key { + return start, p, true + } + // Set to the next key and continue scanning type tokens. + curKey = s[p+1 : p3] + if curKey > key { + return p, p, true + } + // Start of the type token sequence. + start = p + 4 + // A type is at least 3 characters long. + p += 7 // 4 + 3 + } else { + // Attribute or type, which is at least 3 characters long. + p += 4 + } + // p points past the third character of a type or attribute. + max := p + 5 // maximum length of token plus hyphen. + if len(s) < max { + max = len(s) + } + for ; p < max && s[p] != '-'; p++ { + } + // Bail if we have exhausted all tokens or if the next token starts + // a new extension. + if p == len(s) || s[p+2] == '-' { + if curKey == key { + return start, p, true + } + return p, p, true + } + } +} + +// CompactIndex returns an index, where 0 <= index < NumCompactTags, for tags +// for which data exists in the text repository. The index will change over time +// and should not be stored in persistent storage. Extensions, except for the +// 'va' type of the 'u' extension, are ignored. It will return 0, false if no +// compact tag exists, where 0 is the index for the root language (Und). +func CompactIndex(t Tag) (index int, ok bool) { + // TODO: perhaps give more frequent tags a lower index. + // TODO: we could make the indexes stable. This will excluded some + // possibilities for optimization, so don't do this quite yet. + b, s, r := t.Raw() + if len(t.str) > 0 { + if strings.HasPrefix(t.str, "x-") { + // We have no entries for user-defined tags. + return 0, false + } + if uint16(t.pVariant) != t.pExt { + // There are no tags with variants and an u-va type. + if t.TypeForKey("va") != "" { + return 0, false + } + t, _ = Raw.Compose(b, s, r, t.Variants()) + } else if _, ok := t.Extension('u'); ok { + // Strip all but the 'va' entry. + variant := t.TypeForKey("va") + t, _ = Raw.Compose(b, s, r) + t, _ = t.SetTypeForKey("va", variant) + } + if len(t.str) > 0 { + // We have some variants. + for i, s := range specialTags { + if s == t { + return i + 1, true + } + } + return 0, false + } + } + // No variants specified: just compare core components. + // The key has the form lllssrrr, where l, s, and r are nibbles for + // respectively the langID, scriptID, and regionID. + key := uint32(b.langID) << (8 + 12) + key |= uint32(s.scriptID) << 12 + key |= uint32(r.regionID) + x, ok := coreTags[key] + return int(x), ok +} + +// Base is an ISO 639 language code, used for encoding the base language +// of a language tag. +type Base struct { + langID +} + +// ParseBase parses a 2- or 3-letter ISO 639 code. +// It returns a ValueError if s is a well-formed but unknown language identifier +// or another error if another error occurred. +func ParseBase(s string) (Base, error) { + if n := len(s); n < 2 || 3 < n { + return Base{}, errSyntax + } + var buf [3]byte + l, err := getLangID(buf[:copy(buf[:], s)]) + return Base{l}, err +} + +// Script is a 4-letter ISO 15924 code for representing scripts. +// It is idiomatically represented in title case. +type Script struct { + scriptID +} + +// ParseScript parses a 4-letter ISO 15924 code. +// It returns a ValueError if s is a well-formed but unknown script identifier +// or another error if another error occurred. +func ParseScript(s string) (Script, error) { + if len(s) != 4 { + return Script{}, errSyntax + } + var buf [4]byte + sc, err := getScriptID(script, buf[:copy(buf[:], s)]) + return Script{sc}, err +} + +// Region is an ISO 3166-1 or UN M.49 code for representing countries and regions. +type Region struct { + regionID +} + +// EncodeM49 returns the Region for the given UN M.49 code. +// It returns an error if r is not a valid code. +func EncodeM49(r int) (Region, error) { + rid, err := getRegionM49(r) + return Region{rid}, err +} + +// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code. +// It returns a ValueError if s is a well-formed but unknown region identifier +// or another error if another error occurred. +func ParseRegion(s string) (Region, error) { + if n := len(s); n < 2 || 3 < n { + return Region{}, errSyntax + } + var buf [3]byte + r, err := getRegionID(buf[:copy(buf[:], s)]) + return Region{r}, err +} + +// IsCountry returns whether this region is a country or autonomous area. This +// includes non-standard definitions from CLDR. +func (r Region) IsCountry() bool { + if r.regionID == 0 || r.IsGroup() || r.IsPrivateUse() && r.regionID != _XK { + return false + } + return true +} + +// IsGroup returns whether this region defines a collection of regions. This +// includes non-standard definitions from CLDR. +func (r Region) IsGroup() bool { + if r.regionID == 0 { + return false + } + return int(regionInclusion[r.regionID]) < len(regionContainment) +} + +// Contains returns whether Region c is contained by Region r. It returns true +// if c == r. +func (r Region) Contains(c Region) bool { + return r.regionID.contains(c.regionID) +} + +func (r regionID) contains(c regionID) bool { + if r == c { + return true + } + g := regionInclusion[r] + if g >= nRegionGroups { + return false + } + m := regionContainment[g] + + d := regionInclusion[c] + b := regionInclusionBits[d] + + // A contained country may belong to multiple disjoint groups. Matching any + // of these indicates containment. If the contained region is a group, it + // must strictly be a subset. + if d >= nRegionGroups { + return b&m != 0 + } + return b&^m == 0 +} + +var errNoTLD = errors.New("language: region is not a valid ccTLD") + +// TLD returns the country code top-level domain (ccTLD). UK is returned for GB. +// In all other cases it returns either the region itself or an error. +// +// This method may return an error for a region for which there exists a +// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The +// region will already be canonicalized it was obtained from a Tag that was +// obtained using any of the default methods. +func (r Region) TLD() (Region, error) { + // See http://en.wikipedia.org/wiki/Country_code_top-level_domain for the + // difference between ISO 3166-1 and IANA ccTLD. + if r.regionID == _GB { + r = Region{_UK} + } + if (r.typ() & ccTLD) == 0 { + return Region{}, errNoTLD + } + return r, nil +} + +// Canonicalize returns the region or a possible replacement if the region is +// deprecated. It will not return a replacement for deprecated regions that +// are split into multiple regions. +func (r Region) Canonicalize() Region { + if cr := normRegion(r.regionID); cr != 0 { + return Region{cr} + } + return r +} + +// Variant represents a registered variant of a language as defined by BCP 47. +type Variant struct { + variant string +} + +// ParseVariant parses and returns a Variant. An error is returned if s is not +// a valid variant. +func ParseVariant(s string) (Variant, error) { + s = strings.ToLower(s) + if _, ok := variantIndex[s]; ok { + return Variant{s}, nil + } + return Variant{}, mkErrInvalid([]byte(s)) +} + +// String returns the string representation of the variant. +func (v Variant) String() string { + return v.variant +} diff --git a/vendor/golang.org/x/text/language/lookup.go b/vendor/golang.org/x/text/language/lookup.go new file mode 100644 index 000000000..1d80ac370 --- /dev/null +++ b/vendor/golang.org/x/text/language/lookup.go @@ -0,0 +1,396 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "bytes" + "fmt" + "sort" + "strconv" + + "golang.org/x/text/internal/tag" +) + +// findIndex tries to find the given tag in idx and returns a standardized error +// if it could not be found. +func findIndex(idx tag.Index, key []byte, form string) (index int, err error) { + if !tag.FixCase(form, key) { + return 0, errSyntax + } + i := idx.Index(key) + if i == -1 { + return 0, mkErrInvalid(key) + } + return i, nil +} + +func searchUint(imap []uint16, key uint16) int { + return sort.Search(len(imap), func(i int) bool { + return imap[i] >= key + }) +} + +type langID uint16 + +// getLangID returns the langID of s if s is a canonical subtag +// or langUnknown if s is not a canonical subtag. +func getLangID(s []byte) (langID, error) { + if len(s) == 2 { + return getLangISO2(s) + } + return getLangISO3(s) +} + +// mapLang returns the mapped langID of id according to mapping m. +func normLang(id langID) (langID, langAliasType) { + k := sort.Search(len(langAliasMap), func(i int) bool { + return langAliasMap[i].from >= uint16(id) + }) + if k < len(langAliasMap) && langAliasMap[k].from == uint16(id) { + return langID(langAliasMap[k].to), langAliasTypes[k] + } + return id, langAliasTypeUnknown +} + +// getLangISO2 returns the langID for the given 2-letter ISO language code +// or unknownLang if this does not exist. +func getLangISO2(s []byte) (langID, error) { + if !tag.FixCase("zz", s) { + return 0, errSyntax + } + if i := lang.Index(s); i != -1 && lang.Elem(i)[3] != 0 { + return langID(i), nil + } + return 0, mkErrInvalid(s) +} + +const base = 'z' - 'a' + 1 + +func strToInt(s []byte) uint { + v := uint(0) + for i := 0; i < len(s); i++ { + v *= base + v += uint(s[i] - 'a') + } + return v +} + +// converts the given integer to the original ASCII string passed to strToInt. +// len(s) must match the number of characters obtained. +func intToStr(v uint, s []byte) { + for i := len(s) - 1; i >= 0; i-- { + s[i] = byte(v%base) + 'a' + v /= base + } +} + +// getLangISO3 returns the langID for the given 3-letter ISO language code +// or unknownLang if this does not exist. +func getLangISO3(s []byte) (langID, error) { + if tag.FixCase("und", s) { + // first try to match canonical 3-letter entries + for i := lang.Index(s[:2]); i != -1; i = lang.Next(s[:2], i) { + if e := lang.Elem(i); e[3] == 0 && e[2] == s[2] { + // We treat "und" as special and always translate it to "unspecified". + // Note that ZZ and Zzzz are private use and are not treated as + // unspecified by default. + id := langID(i) + if id == nonCanonicalUnd { + return 0, nil + } + return id, nil + } + } + if i := altLangISO3.Index(s); i != -1 { + return langID(altLangIndex[altLangISO3.Elem(i)[3]]), nil + } + n := strToInt(s) + if langNoIndex[n/8]&(1<<(n%8)) != 0 { + return langID(n) + langNoIndexOffset, nil + } + // Check for non-canonical uses of ISO3. + for i := lang.Index(s[:1]); i != -1; i = lang.Next(s[:1], i) { + if e := lang.Elem(i); e[2] == s[1] && e[3] == s[2] { + return langID(i), nil + } + } + return 0, mkErrInvalid(s) + } + return 0, errSyntax +} + +// stringToBuf writes the string to b and returns the number of bytes +// written. cap(b) must be >= 3. +func (id langID) stringToBuf(b []byte) int { + if id >= langNoIndexOffset { + intToStr(uint(id)-langNoIndexOffset, b[:3]) + return 3 + } else if id == 0 { + return copy(b, "und") + } + l := lang[id<<2:] + if l[3] == 0 { + return copy(b, l[:3]) + } + return copy(b, l[:2]) +} + +// String returns the BCP 47 representation of the langID. +// Use b as variable name, instead of id, to ensure the variable +// used is consistent with that of Base in which this type is embedded. +func (b langID) String() string { + if b == 0 { + return "und" + } else if b >= langNoIndexOffset { + b -= langNoIndexOffset + buf := [3]byte{} + intToStr(uint(b), buf[:]) + return string(buf[:]) + } + l := lang.Elem(int(b)) + if l[3] == 0 { + return l[:3] + } + return l[:2] +} + +// ISO3 returns the ISO 639-3 language code. +func (b langID) ISO3() string { + if b == 0 || b >= langNoIndexOffset { + return b.String() + } + l := lang.Elem(int(b)) + if l[3] == 0 { + return l[:3] + } else if l[2] == 0 { + return altLangISO3.Elem(int(l[3]))[:3] + } + // This allocation will only happen for 3-letter ISO codes + // that are non-canonical BCP 47 language identifiers. + return l[0:1] + l[2:4] +} + +// IsPrivateUse reports whether this language code is reserved for private use. +func (b langID) IsPrivateUse() bool { + return langPrivateStart <= b && b <= langPrivateEnd +} + +type regionID uint16 + +// getRegionID returns the region id for s if s is a valid 2-letter region code +// or unknownRegion. +func getRegionID(s []byte) (regionID, error) { + if len(s) == 3 { + if isAlpha(s[0]) { + return getRegionISO3(s) + } + if i, err := strconv.ParseUint(string(s), 10, 10); err == nil { + return getRegionM49(int(i)) + } + } + return getRegionISO2(s) +} + +// getRegionISO2 returns the regionID for the given 2-letter ISO country code +// or unknownRegion if this does not exist. +func getRegionISO2(s []byte) (regionID, error) { + i, err := findIndex(regionISO, s, "ZZ") + if err != nil { + return 0, err + } + return regionID(i) + isoRegionOffset, nil +} + +// getRegionISO3 returns the regionID for the given 3-letter ISO country code +// or unknownRegion if this does not exist. +func getRegionISO3(s []byte) (regionID, error) { + if tag.FixCase("ZZZ", s) { + for i := regionISO.Index(s[:1]); i != -1; i = regionISO.Next(s[:1], i) { + if e := regionISO.Elem(i); e[2] == s[1] && e[3] == s[2] { + return regionID(i) + isoRegionOffset, nil + } + } + for i := 0; i < len(altRegionISO3); i += 3 { + if tag.Compare(altRegionISO3[i:i+3], s) == 0 { + return regionID(altRegionIDs[i/3]), nil + } + } + return 0, mkErrInvalid(s) + } + return 0, errSyntax +} + +func getRegionM49(n int) (regionID, error) { + if 0 < n && n <= 999 { + const ( + searchBits = 7 + regionBits = 9 + regionMask = 1<> searchBits + buf := fromM49[m49Index[idx]:m49Index[idx+1]] + val := uint16(n) << regionBits // we rely on bits shifting out + i := sort.Search(len(buf), func(i int) bool { + return buf[i] >= val + }) + if r := fromM49[int(m49Index[idx])+i]; r&^regionMask == val { + return regionID(r & regionMask), nil + } + } + var e ValueError + fmt.Fprint(bytes.NewBuffer([]byte(e.v[:])), n) + return 0, e +} + +// normRegion returns a region if r is deprecated or 0 otherwise. +// TODO: consider supporting BYS (-> BLR), CSK (-> 200 or CZ), PHI (-> PHL) and AFI (-> DJ). +// TODO: consider mapping split up regions to new most populous one (like CLDR). +func normRegion(r regionID) regionID { + m := regionOldMap + k := sort.Search(len(m), func(i int) bool { + return m[i].from >= uint16(r) + }) + if k < len(m) && m[k].from == uint16(r) { + return regionID(m[k].to) + } + return 0 +} + +const ( + iso3166UserAssigned = 1 << iota + ccTLD + bcp47Region +) + +func (r regionID) typ() byte { + return regionTypes[r] +} + +// String returns the BCP 47 representation for the region. +// It returns "ZZ" for an unspecified region. +func (r regionID) String() string { + if r < isoRegionOffset { + if r == 0 { + return "ZZ" + } + return fmt.Sprintf("%03d", r.M49()) + } + r -= isoRegionOffset + return regionISO.Elem(int(r))[:2] +} + +// ISO3 returns the 3-letter ISO code of r. +// Note that not all regions have a 3-letter ISO code. +// In such cases this method returns "ZZZ". +func (r regionID) ISO3() string { + if r < isoRegionOffset { + return "ZZZ" + } + r -= isoRegionOffset + reg := regionISO.Elem(int(r)) + switch reg[2] { + case 0: + return altRegionISO3[reg[3]:][:3] + case ' ': + return "ZZZ" + } + return reg[0:1] + reg[2:4] +} + +// M49 returns the UN M.49 encoding of r, or 0 if this encoding +// is not defined for r. +func (r regionID) M49() int { + return int(m49[r]) +} + +// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This +// may include private-use tags that are assigned by CLDR and used in this +// implementation. So IsPrivateUse and IsCountry can be simultaneously true. +func (r regionID) IsPrivateUse() bool { + return r.typ()&iso3166UserAssigned != 0 +} + +type scriptID uint8 + +// getScriptID returns the script id for string s. It assumes that s +// is of the format [A-Z][a-z]{3}. +func getScriptID(idx tag.Index, s []byte) (scriptID, error) { + i, err := findIndex(idx, s, "Zzzz") + return scriptID(i), err +} + +// String returns the script code in title case. +// It returns "Zzzz" for an unspecified script. +func (s scriptID) String() string { + if s == 0 { + return "Zzzz" + } + return script.Elem(int(s)) +} + +// IsPrivateUse reports whether this script code is reserved for private use. +func (s scriptID) IsPrivateUse() bool { + return _Qaaa <= s && s <= _Qabx +} + +const ( + maxAltTaglen = len("en-US-POSIX") + maxLen = maxAltTaglen +) + +var ( + // grandfatheredMap holds a mapping from legacy and grandfathered tags to + // their base language or index to more elaborate tag. + grandfatheredMap = map[[maxLen]byte]int16{ + [maxLen]byte{'a', 'r', 't', '-', 'l', 'o', 'j', 'b', 'a', 'n'}: _jbo, // art-lojban + [maxLen]byte{'i', '-', 'a', 'm', 'i'}: _ami, // i-ami + [maxLen]byte{'i', '-', 'b', 'n', 'n'}: _bnn, // i-bnn + [maxLen]byte{'i', '-', 'h', 'a', 'k'}: _hak, // i-hak + [maxLen]byte{'i', '-', 'k', 'l', 'i', 'n', 'g', 'o', 'n'}: _tlh, // i-klingon + [maxLen]byte{'i', '-', 'l', 'u', 'x'}: _lb, // i-lux + [maxLen]byte{'i', '-', 'n', 'a', 'v', 'a', 'j', 'o'}: _nv, // i-navajo + [maxLen]byte{'i', '-', 'p', 'w', 'n'}: _pwn, // i-pwn + [maxLen]byte{'i', '-', 't', 'a', 'o'}: _tao, // i-tao + [maxLen]byte{'i', '-', 't', 'a', 'y'}: _tay, // i-tay + [maxLen]byte{'i', '-', 't', 's', 'u'}: _tsu, // i-tsu + [maxLen]byte{'n', 'o', '-', 'b', 'o', 'k'}: _nb, // no-bok + [maxLen]byte{'n', 'o', '-', 'n', 'y', 'n'}: _nn, // no-nyn + [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'f', 'r'}: _sfb, // sgn-BE-FR + [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'n', 'l'}: _vgt, // sgn-BE-NL + [maxLen]byte{'s', 'g', 'n', '-', 'c', 'h', '-', 'd', 'e'}: _sgg, // sgn-CH-DE + [maxLen]byte{'z', 'h', '-', 'g', 'u', 'o', 'y', 'u'}: _cmn, // zh-guoyu + [maxLen]byte{'z', 'h', '-', 'h', 'a', 'k', 'k', 'a'}: _hak, // zh-hakka + [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n', '-', 'n', 'a', 'n'}: _nan, // zh-min-nan + [maxLen]byte{'z', 'h', '-', 'x', 'i', 'a', 'n', 'g'}: _hsn, // zh-xiang + + // Grandfathered tags with no modern replacement will be converted as + // follows: + [maxLen]byte{'c', 'e', 'l', '-', 'g', 'a', 'u', 'l', 'i', 's', 'h'}: -1, // cel-gaulish + [maxLen]byte{'e', 'n', '-', 'g', 'b', '-', 'o', 'e', 'd'}: -2, // en-GB-oed + [maxLen]byte{'i', '-', 'd', 'e', 'f', 'a', 'u', 'l', 't'}: -3, // i-default + [maxLen]byte{'i', '-', 'e', 'n', 'o', 'c', 'h', 'i', 'a', 'n'}: -4, // i-enochian + [maxLen]byte{'i', '-', 'm', 'i', 'n', 'g', 'o'}: -5, // i-mingo + [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n'}: -6, // zh-min + + // CLDR-specific tag. + [maxLen]byte{'r', 'o', 'o', 't'}: 0, // root + [maxLen]byte{'e', 'n', '-', 'u', 's', '-', 'p', 'o', 's', 'i', 'x'}: -7, // en_US_POSIX" + } + + altTagIndex = [...]uint8{0, 17, 31, 45, 61, 74, 86, 102} + + altTags = "xtg-x-cel-gaulishen-GB-oxendicten-x-i-defaultund-x-i-enochiansee-x-i-mingonan-x-zh-minen-US-u-va-posix" +) + +func grandfathered(s [maxAltTaglen]byte) (t Tag, ok bool) { + if v, ok := grandfatheredMap[s]; ok { + if v < 0 { + return Make(altTags[altTagIndex[-v-1]:altTagIndex[-v]]), true + } + t.lang = langID(v) + return t, true + } + return t, false +} diff --git a/vendor/golang.org/x/text/language/match.go b/vendor/golang.org/x/text/language/match.go new file mode 100644 index 000000000..8ad950533 --- /dev/null +++ b/vendor/golang.org/x/text/language/match.go @@ -0,0 +1,841 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import "errors" + +// Matcher is the interface that wraps the Match method. +// +// Match returns the best match for any of the given tags, along with +// a unique index associated with the returned tag and a confidence +// score. +type Matcher interface { + Match(t ...Tag) (tag Tag, index int, c Confidence) +} + +// Comprehends reports the confidence score for a speaker of a given language +// to being able to comprehend the written form of an alternative language. +func Comprehends(speaker, alternative Tag) Confidence { + _, _, c := NewMatcher([]Tag{alternative}).Match(speaker) + return c +} + +// NewMatcher returns a Matcher that matches an ordered list of preferred tags +// against a list of supported tags based on written intelligibility, closeness +// of dialect, equivalence of subtags and various other rules. It is initialized +// with the list of supported tags. The first element is used as the default +// value in case no match is found. +// +// Its Match method matches the first of the given Tags to reach a certain +// confidence threshold. The tags passed to Match should therefore be specified +// in order of preference. Extensions are ignored for matching. +// +// The index returned by the Match method corresponds to the index of the +// matched tag in t, but is augmented with the Unicode extension ('u')of the +// corresponding preferred tag. This allows user locale options to be passed +// transparently. +func NewMatcher(t []Tag) Matcher { + return newMatcher(t) +} + +func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) { + match, w, c := m.getBest(want...) + if match == nil { + t = m.default_.tag + } else { + t, index = match.tag, match.index + } + // Copy options from the user-provided tag into the result tag. This is hard + // to do after the fact, so we do it here. + // TODO: consider also adding in variants that are compatible with the + // matched language. + // TODO: Add back region if it is non-ambiguous? Or create another tag to + // preserve the region? + if u, ok := w.Extension('u'); ok { + t, _ = Raw.Compose(t, u) + } + return t, index, c +} + +type scriptRegionFlags uint8 + +const ( + isList = 1 << iota + scriptInFrom + regionInFrom +) + +func (t *Tag) setUndefinedLang(id langID) { + if t.lang == 0 { + t.lang = id + } +} + +func (t *Tag) setUndefinedScript(id scriptID) { + if t.script == 0 { + t.script = id + } +} + +func (t *Tag) setUndefinedRegion(id regionID) { + if t.region == 0 || t.region.contains(id) { + t.region = id + } +} + +// ErrMissingLikelyTagsData indicates no information was available +// to compute likely values of missing tags. +var ErrMissingLikelyTagsData = errors.New("missing likely tags data") + +// addLikelySubtags sets subtags to their most likely value, given the locale. +// In most cases this means setting fields for unknown values, but in some +// cases it may alter a value. It returns a ErrMissingLikelyTagsData error +// if the given locale cannot be expanded. +func (t Tag) addLikelySubtags() (Tag, error) { + id, err := addTags(t) + if err != nil { + return t, err + } else if id.equalTags(t) { + return t, nil + } + id.remakeString() + return id, nil +} + +// specializeRegion attempts to specialize a group region. +func specializeRegion(t *Tag) bool { + if i := regionInclusion[t.region]; i < nRegionGroups { + x := likelyRegionGroup[i] + if langID(x.lang) == t.lang && scriptID(x.script) == t.script { + t.region = regionID(x.region) + } + return true + } + return false +} + +func addTags(t Tag) (Tag, error) { + // We leave private use identifiers alone. + if t.private() { + return t, nil + } + if t.script != 0 && t.region != 0 { + if t.lang != 0 { + // already fully specified + specializeRegion(&t) + return t, nil + } + // Search matches for und-script-region. Note that for these cases + // region will never be a group so there is no need to check for this. + list := likelyRegion[t.region : t.region+1] + if x := list[0]; x.flags&isList != 0 { + list = likelyRegionList[x.lang : x.lang+uint16(x.script)] + } + for _, x := range list { + // Deviating from the spec. See match_test.go for details. + if scriptID(x.script) == t.script { + t.setUndefinedLang(langID(x.lang)) + return t, nil + } + } + } + if t.lang != 0 { + // Search matches for lang-script and lang-region, where lang != und. + if t.lang < langNoIndexOffset { + x := likelyLang[t.lang] + if x.flags&isList != 0 { + list := likelyLangList[x.region : x.region+uint16(x.script)] + if t.script != 0 { + for _, x := range list { + if scriptID(x.script) == t.script && x.flags&scriptInFrom != 0 { + t.setUndefinedRegion(regionID(x.region)) + return t, nil + } + } + } else if t.region != 0 { + count := 0 + goodScript := true + tt := t + for _, x := range list { + // We visit all entries for which the script was not + // defined, including the ones where the region was not + // defined. This allows for proper disambiguation within + // regions. + if x.flags&scriptInFrom == 0 && t.region.contains(regionID(x.region)) { + tt.region = regionID(x.region) + tt.setUndefinedScript(scriptID(x.script)) + goodScript = goodScript && tt.script == scriptID(x.script) + count++ + } + } + if count == 1 { + return tt, nil + } + // Even if we fail to find a unique Region, we might have + // an unambiguous script. + if goodScript { + t.script = tt.script + } + } + } + } + } else { + // Search matches for und-script. + if t.script != 0 { + x := likelyScript[t.script] + if x.region != 0 { + t.setUndefinedRegion(regionID(x.region)) + t.setUndefinedLang(langID(x.lang)) + return t, nil + } + } + // Search matches for und-region. If und-script-region exists, it would + // have been found earlier. + if t.region != 0 { + if i := regionInclusion[t.region]; i < nRegionGroups { + x := likelyRegionGroup[i] + if x.region != 0 { + t.setUndefinedLang(langID(x.lang)) + t.setUndefinedScript(scriptID(x.script)) + t.region = regionID(x.region) + } + } else { + x := likelyRegion[t.region] + if x.flags&isList != 0 { + x = likelyRegionList[x.lang] + } + if x.script != 0 && x.flags != scriptInFrom { + t.setUndefinedLang(langID(x.lang)) + t.setUndefinedScript(scriptID(x.script)) + return t, nil + } + } + } + } + + // Search matches for lang. + if t.lang < langNoIndexOffset { + x := likelyLang[t.lang] + if x.flags&isList != 0 { + x = likelyLangList[x.region] + } + if x.region != 0 { + t.setUndefinedScript(scriptID(x.script)) + t.setUndefinedRegion(regionID(x.region)) + } + specializeRegion(&t) + if t.lang == 0 { + t.lang = _en // default language + } + return t, nil + } + return t, ErrMissingLikelyTagsData +} + +func (t *Tag) setTagsFrom(id Tag) { + t.lang = id.lang + t.script = id.script + t.region = id.region +} + +// minimize removes the region or script subtags from t such that +// t.addLikelySubtags() == t.minimize().addLikelySubtags(). +func (t Tag) minimize() (Tag, error) { + t, err := minimizeTags(t) + if err != nil { + return t, err + } + t.remakeString() + return t, nil +} + +// minimizeTags mimics the behavior of the ICU 51 C implementation. +func minimizeTags(t Tag) (Tag, error) { + if t.equalTags(und) { + return t, nil + } + max, err := addTags(t) + if err != nil { + return t, err + } + for _, id := range [...]Tag{ + {lang: t.lang}, + {lang: t.lang, region: t.region}, + {lang: t.lang, script: t.script}, + } { + if x, err := addTags(id); err == nil && max.equalTags(x) { + t.setTagsFrom(id) + break + } + } + return t, nil +} + +// Tag Matching +// CLDR defines an algorithm for finding the best match between two sets of language +// tags. The basic algorithm defines how to score a possible match and then find +// the match with the best score +// (see http://www.unicode.org/reports/tr35/#LanguageMatching). +// Using scoring has several disadvantages. The scoring obfuscates the importance of +// the various factors considered, making the algorithm harder to understand. Using +// scoring also requires the full score to be computed for each pair of tags. +// +// We will use a different algorithm which aims to have the following properties: +// - clarity on the precedence of the various selection factors, and +// - improved performance by allowing early termination of a comparison. +// +// Matching algorithm (overview) +// Input: +// - supported: a set of supported tags +// - default: the default tag to return in case there is no match +// - desired: list of desired tags, ordered by preference, starting with +// the most-preferred. +// +// Algorithm: +// 1) Set the best match to the lowest confidence level +// 2) For each tag in "desired": +// a) For each tag in "supported": +// 1) compute the match between the two tags. +// 2) if the match is better than the previous best match, replace it +// with the new match. (see next section) +// b) if the current best match is above a certain threshold, return this +// match without proceeding to the next tag in "desired". [See Note 1] +// 3) If the best match so far is below a certain threshold, return "default". +// +// Ranking: +// We use two phases to determine whether one pair of tags are a better match +// than another pair of tags. First, we determine a rough confidence level. If the +// levels are different, the one with the highest confidence wins. +// Second, if the rough confidence levels are identical, we use a set of tie-breaker +// rules. +// +// The confidence level of matching a pair of tags is determined by finding the +// lowest confidence level of any matches of the corresponding subtags (the +// result is deemed as good as its weakest link). +// We define the following levels: +// Exact - An exact match of a subtag, before adding likely subtags. +// MaxExact - An exact match of a subtag, after adding likely subtags. +// [See Note 2]. +// High - High level of mutual intelligibility between different subtag +// variants. +// Low - Low level of mutual intelligibility between different subtag +// variants. +// No - No mutual intelligibility. +// +// The following levels can occur for each type of subtag: +// Base: Exact, MaxExact, High, Low, No +// Script: Exact, MaxExact [see Note 3], Low, No +// Region: Exact, MaxExact, High +// Variant: Exact, High +// Private: Exact, No +// +// Any result with a confidence level of Low or higher is deemed a possible match. +// Once a desired tag matches any of the supported tags with a level of MaxExact +// or higher, the next desired tag is not considered (see Step 2.b). +// Note that CLDR provides languageMatching data that defines close equivalence +// classes for base languages, scripts and regions. +// +// Tie-breaking +// If we get the same confidence level for two matches, we apply a sequence of +// tie-breaking rules. The first that succeeds defines the result. The rules are +// applied in the following order. +// 1) Original language was defined and was identical. +// 2) Original region was defined and was identical. +// 3) Distance between two maximized regions was the smallest. +// 4) Original script was defined and was identical. +// 5) Distance from want tag to have tag using the parent relation [see Note 5.] +// If there is still no winner after these rules are applied, the first match +// found wins. +// +// Notes: +// [1] Note that even if we may not have a perfect match, if a match is above a +// certain threshold, it is considered a better match than any other match +// to a tag later in the list of preferred language tags. +// [2] In practice, as matching of Exact is done in a separate phase from +// matching the other levels, we reuse the Exact level to mean MaxExact in +// the second phase. As a consequence, we only need the levels defined by +// the Confidence type. The MaxExact confidence level is mapped to High in +// the public API. +// [3] We do not differentiate between maximized script values that were derived +// from suppressScript versus most likely tag data. We determined that in +// ranking the two, one ranks just after the other. Moreover, the two cannot +// occur concurrently. As a consequence, they are identical for practical +// purposes. +// [4] In case of deprecated, macro-equivalents and legacy mappings, we assign +// the MaxExact level to allow iw vs he to still be a closer match than +// en-AU vs en-US, for example. +// [5] In CLDR a locale inherits fields that are unspecified for this locale +// from its parent. Therefore, if a locale is a parent of another locale, +// it is a strong measure for closeness, especially when no other tie +// breaker rule applies. One could also argue it is inconsistent, for +// example, when pt-AO matches pt (which CLDR equates with pt-BR), even +// though its parent is pt-PT according to the inheritance rules. +// +// Implementation Details: +// There are several performance considerations worth pointing out. Most notably, +// we preprocess as much as possible (within reason) at the time of creation of a +// matcher. This includes: +// - creating a per-language map, which includes data for the raw base language +// and its canonicalized variant (if applicable), +// - expanding entries for the equivalence classes defined in CLDR's +// languageMatch data. +// The per-language map ensures that typically only a very small number of tags +// need to be considered. The pre-expansion of canonicalized subtags and +// equivalence classes reduces the amount of map lookups that need to be done at +// runtime. + +// matcher keeps a set of supported language tags, indexed by language. +type matcher struct { + default_ *haveTag + index map[langID]*matchHeader + passSettings bool +} + +// matchHeader has the lists of tags for exact matches and matches based on +// maximized and canonicalized tags for a given language. +type matchHeader struct { + exact []*haveTag + max []*haveTag +} + +// haveTag holds a supported Tag and its maximized script and region. The maximized +// or canonicalized language is not stored as it is not needed during matching. +type haveTag struct { + tag Tag + + // index of this tag in the original list of supported tags. + index int + + // conf is the maximum confidence that can result from matching this haveTag. + // When conf < Exact this means it was inserted after applying a CLDR equivalence rule. + conf Confidence + + // Maximized region and script. + maxRegion regionID + maxScript scriptID + + // altScript may be checked as an alternative match to maxScript. If altScript + // matches, the confidence level for this match is Low. Theoretically there + // could be multiple alternative scripts. This does not occur in practice. + altScript scriptID + + // nextMax is the index of the next haveTag with the same maximized tags. + nextMax uint16 +} + +func makeHaveTag(tag Tag, index int) (haveTag, langID) { + max := tag + if tag.lang != 0 { + max, _ = max.canonicalize(All) + max, _ = addTags(max) + max.remakeString() + } + return haveTag{tag, index, Exact, max.region, max.script, altScript(max.lang, max.script), 0}, max.lang +} + +// altScript returns an alternative script that may match the given script with +// a low confidence. At the moment, the langMatch data allows for at most one +// script to map to another and we rely on this to keep the code simple. +func altScript(l langID, s scriptID) scriptID { + for _, alt := range matchScript { + if (alt.lang == 0 || langID(alt.lang) == l) && scriptID(alt.have) == s { + return scriptID(alt.want) + } + } + return 0 +} + +// addIfNew adds a haveTag to the list of tags only if it is a unique tag. +// Tags that have the same maximized values are linked by index. +func (h *matchHeader) addIfNew(n haveTag, exact bool) { + // Don't add new exact matches. + for _, v := range h.exact { + if v.tag.equalsRest(n.tag) { + return + } + } + if exact { + h.exact = append(h.exact, &n) + } + // Allow duplicate maximized tags, but create a linked list to allow quickly + // comparing the equivalents and bail out. + for i, v := range h.max { + if v.maxScript == n.maxScript && + v.maxRegion == n.maxRegion && + v.tag.variantOrPrivateTagStr() == n.tag.variantOrPrivateTagStr() { + for h.max[i].nextMax != 0 { + i = int(h.max[i].nextMax) + } + h.max[i].nextMax = uint16(len(h.max)) + break + } + } + h.max = append(h.max, &n) +} + +// header returns the matchHeader for the given language. It creates one if +// it doesn't already exist. +func (m *matcher) header(l langID) *matchHeader { + if h := m.index[l]; h != nil { + return h + } + h := &matchHeader{} + m.index[l] = h + return h +} + +// newMatcher builds an index for the given supported tags and returns it as +// a matcher. It also expands the index by considering various equivalence classes +// for a given tag. +func newMatcher(supported []Tag) *matcher { + m := &matcher{ + index: make(map[langID]*matchHeader), + } + if len(supported) == 0 { + m.default_ = &haveTag{} + return m + } + // Add supported languages to the index. Add exact matches first to give + // them precedence. + for i, tag := range supported { + pair, _ := makeHaveTag(tag, i) + m.header(tag.lang).addIfNew(pair, true) + } + m.default_ = m.header(supported[0].lang).exact[0] + for i, tag := range supported { + pair, max := makeHaveTag(tag, i) + if max != tag.lang { + m.header(max).addIfNew(pair, false) + } + } + + // update is used to add indexes in the map for equivalent languages. + // If force is true, the update will also apply to derived entries. To + // avoid applying a "transitive closure", use false. + update := func(want, have uint16, conf Confidence, force bool) { + if hh := m.index[langID(have)]; hh != nil { + if !force && len(hh.exact) == 0 { + return + } + hw := m.header(langID(want)) + for _, ht := range hh.max { + v := *ht + if conf < v.conf { + v.conf = conf + } + v.nextMax = 0 // this value needs to be recomputed + if v.altScript != 0 { + v.altScript = altScript(langID(want), v.maxScript) + } + hw.addIfNew(v, conf == Exact && len(hh.exact) > 0) + } + } + } + + // Add entries for languages with mutual intelligibility as defined by CLDR's + // languageMatch data. + for _, ml := range matchLang { + update(ml.want, ml.have, Confidence(ml.conf), false) + if !ml.oneway { + update(ml.have, ml.want, Confidence(ml.conf), false) + } + } + + // Add entries for possible canonicalizations. This is an optimization to + // ensure that only one map lookup needs to be done at runtime per desired tag. + // First we match deprecated equivalents. If they are perfect equivalents + // (their canonicalization simply substitutes a different language code, but + // nothing else), the match confidence is Exact, otherwise it is High. + for i, lm := range langAliasMap { + if lm.from == _sh { + continue + } + + // If deprecated codes match and there is no fiddling with the script or + // or region, we consider it an exact match. + conf := Exact + if langAliasTypes[i] != langMacro { + if !isExactEquivalent(langID(lm.from)) { + conf = High + } + update(lm.to, lm.from, conf, true) + } + update(lm.from, lm.to, conf, true) + } + return m +} + +// getBest gets the best matching tag in m for any of the given tags, taking into +// account the order of preference of the given tags. +func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) { + best := bestMatch{} + for _, w := range want { + var max Tag + // Check for exact match first. + h := m.index[w.lang] + if w.lang != 0 { + // Base language is defined. + if h == nil { + continue + } + for i := range h.exact { + have := h.exact[i] + if have.tag.equalsRest(w) { + return have, w, Exact + } + } + max, _ = w.canonicalize(Legacy | Deprecated) + max, _ = addTags(max) + } else { + // Base language is not defined. + if h != nil { + for i := range h.exact { + have := h.exact[i] + if have.tag.equalsRest(w) { + return have, w, Exact + } + } + } + if w.script == 0 && w.region == 0 { + // We skip all tags matching und for approximate matching, including + // private tags. + continue + } + max, _ = addTags(w) + if h = m.index[max.lang]; h == nil { + continue + } + } + // Check for match based on maximized tag. + for i := range h.max { + have := h.max[i] + best.update(have, w, max.script, max.region) + if best.conf == Exact { + for have.nextMax != 0 { + have = h.max[have.nextMax] + best.update(have, w, max.script, max.region) + } + return best.have, best.want, High + } + } + } + if best.conf <= No { + if len(want) != 0 { + return nil, want[0], No + } + return nil, Tag{}, No + } + return best.have, best.want, best.conf +} + +// bestMatch accumulates the best match so far. +type bestMatch struct { + have *haveTag + want Tag + conf Confidence + // Cached results from applying tie-breaking rules. + origLang bool + origReg bool + regDist uint8 + origScript bool + parentDist uint8 // 255 if have is not an ancestor of want tag. +} + +// update updates the existing best match if the new pair is considered to be a +// better match. +// To determine if the given pair is a better match, it first computes the rough +// confidence level. If this surpasses the current match, it will replace it and +// update the tie-breaker rule cache. If there is a tie, it proceeds with applying +// a series of tie-breaker rules. If there is no conclusive winner after applying +// the tie-breaker rules, it leaves the current match as the preferred match. +func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion regionID) { + // Bail if the maximum attainable confidence is below that of the current best match. + c := have.conf + if c < m.conf { + return + } + if have.maxScript != maxScript { + // There is usually very little comprehension between different scripts. + // In a few cases there may still be Low comprehension. This possibility is + // pre-computed and stored in have.altScript. + if Low < m.conf || have.altScript != maxScript { + return + } + c = Low + } else if have.maxRegion != maxRegion { + // There is usually a small difference between languages across regions. + // We use the region distance (below) to disambiguate between equal matches. + if High < c { + c = High + } + } + + // We store the results of the computations of the tie-breaker rules along + // with the best match. There is no need to do the checks once we determine + // we have a winner, but we do still need to do the tie-breaker computations. + // We use "beaten" to keep track if we still need to do the checks. + beaten := false // true if the new pair defeats the current one. + if c != m.conf { + if c < m.conf { + return + } + beaten = true + } + + // Tie-breaker rules: + // We prefer if the pre-maximized language was specified and identical. + origLang := have.tag.lang == tag.lang && tag.lang != 0 + if !beaten && m.origLang != origLang { + if m.origLang { + return + } + beaten = true + } + + // We prefer if the pre-maximized region was specified and identical. + origReg := have.tag.region == tag.region && tag.region != 0 + if !beaten && m.origReg != origReg { + if m.origReg { + return + } + beaten = true + } + + // Next we prefer smaller distances between regions, as defined by regionDist. + regDist := regionDist(have.maxRegion, maxRegion, tag.lang) + if !beaten && m.regDist != regDist { + if regDist > m.regDist { + return + } + beaten = true + } + + // Next we prefer if the pre-maximized script was specified and identical. + origScript := have.tag.script == tag.script && tag.script != 0 + if !beaten && m.origScript != origScript { + if m.origScript { + return + } + beaten = true + } + + // Finally we prefer tags which have a closer parent relationship. + parentDist := parentDistance(have.tag.region, tag) + if !beaten && m.parentDist != parentDist { + if parentDist > m.parentDist { + return + } + beaten = true + } + + // Update m to the newly found best match. + if beaten { + m.have = have + m.want = tag + m.conf = c + m.origLang = origLang + m.origReg = origReg + m.origScript = origScript + m.regDist = regDist + m.parentDist = parentDist + } +} + +// parentDistance returns the number of times Parent must be called before the +// regions match. It is assumed that it has already been checked that lang and +// script are identical. If haveRegion does not occur in the ancestor chain of +// tag, it returns 255. +func parentDistance(haveRegion regionID, tag Tag) uint8 { + p := tag.Parent() + d := uint8(1) + for haveRegion != p.region { + if p.region == 0 { + return 255 + } + p = p.Parent() + d++ + } + return d +} + +// regionDist wraps regionDistance with some exceptions to the algorithmic distance. +func regionDist(a, b regionID, lang langID) uint8 { + if lang == _en { + // Two variants of non-US English are close to each other, regardless of distance. + if a != _US && b != _US { + return 2 + } + } + return uint8(regionDistance(a, b)) +} + +// regionDistance computes the distance between two regions based on the +// distance in the graph of region containments as defined in CLDR. It iterates +// over increasingly inclusive sets of groups, represented as bit vectors, until +// the source bit vector has bits in common with the destination vector. +func regionDistance(a, b regionID) int { + if a == b { + return 0 + } + p, q := regionInclusion[a], regionInclusion[b] + if p < nRegionGroups { + p, q = q, p + } + set := regionInclusionBits + if q < nRegionGroups && set[p]&(1< 0 { + return t.str[t.pVariant:t.pExt] + } + return t.str[t.pVariant:] +} + +// equalsRest compares everything except the language. +func (a Tag) equalsRest(b Tag) bool { + // TODO: don't include extensions in this comparison. To do this efficiently, + // though, we should handle private tags separately. + return a.script == b.script && a.region == b.region && a.variantOrPrivateTagStr() == b.variantOrPrivateTagStr() +} + +// isExactEquivalent returns true if canonicalizing the language will not alter +// the script or region of a tag. +func isExactEquivalent(l langID) bool { + for _, o := range notEquivalent { + if o == l { + return false + } + } + return true +} + +var notEquivalent []langID + +func init() { + // Create a list of all languages for which canonicalization may alter the + // script or region. + for _, lm := range langAliasMap { + tag := Tag{lang: langID(lm.from)} + if tag, _ = tag.canonicalize(All); tag.script != 0 || tag.region != 0 { + notEquivalent = append(notEquivalent, langID(lm.from)) + } + } +} diff --git a/vendor/golang.org/x/text/language/parse.go b/vendor/golang.org/x/text/language/parse.go new file mode 100644 index 000000000..cfa28f56e --- /dev/null +++ b/vendor/golang.org/x/text/language/parse.go @@ -0,0 +1,859 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "bytes" + "errors" + "fmt" + "sort" + "strconv" + "strings" + + "golang.org/x/text/internal/tag" +) + +// isAlpha returns true if the byte is not a digit. +// b must be an ASCII letter or digit. +func isAlpha(b byte) bool { + return b > '9' +} + +// isAlphaNum returns true if the string contains only ASCII letters or digits. +func isAlphaNum(s []byte) bool { + for _, c := range s { + if !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9') { + return false + } + } + return true +} + +// errSyntax is returned by any of the parsing functions when the +// input is not well-formed, according to BCP 47. +// TODO: return the position at which the syntax error occurred? +var errSyntax = errors.New("language: tag is not well-formed") + +// ValueError is returned by any of the parsing functions when the +// input is well-formed but the respective subtag is not recognized +// as a valid value. +type ValueError struct { + v [8]byte +} + +func mkErrInvalid(s []byte) error { + var e ValueError + copy(e.v[:], s) + return e +} + +func (e ValueError) tag() []byte { + n := bytes.IndexByte(e.v[:], 0) + if n == -1 { + n = 8 + } + return e.v[:n] +} + +// Error implements the error interface. +func (e ValueError) Error() string { + return fmt.Sprintf("language: subtag %q is well-formed but unknown", e.tag()) +} + +// Subtag returns the subtag for which the error occurred. +func (e ValueError) Subtag() string { + return string(e.tag()) +} + +// scanner is used to scan BCP 47 tokens, which are separated by _ or -. +type scanner struct { + b []byte + bytes [max99thPercentileSize]byte + token []byte + start int // start position of the current token + end int // end position of the current token + next int // next point for scan + err error + done bool +} + +func makeScannerString(s string) scanner { + scan := scanner{} + if len(s) <= len(scan.bytes) { + scan.b = scan.bytes[:copy(scan.bytes[:], s)] + } else { + scan.b = []byte(s) + } + scan.init() + return scan +} + +// makeScanner returns a scanner using b as the input buffer. +// b is not copied and may be modified by the scanner routines. +func makeScanner(b []byte) scanner { + scan := scanner{b: b} + scan.init() + return scan +} + +func (s *scanner) init() { + for i, c := range s.b { + if c == '_' { + s.b[i] = '-' + } + } + s.scan() +} + +// restToLower converts the string between start and end to lower case. +func (s *scanner) toLower(start, end int) { + for i := start; i < end; i++ { + c := s.b[i] + if 'A' <= c && c <= 'Z' { + s.b[i] += 'a' - 'A' + } + } +} + +func (s *scanner) setError(e error) { + if s.err == nil || (e == errSyntax && s.err != errSyntax) { + s.err = e + } +} + +// resizeRange shrinks or grows the array at position oldStart such that +// a new string of size newSize can fit between oldStart and oldEnd. +// Sets the scan point to after the resized range. +func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) { + s.start = oldStart + if end := oldStart + newSize; end != oldEnd { + diff := end - oldEnd + if end < cap(s.b) { + b := make([]byte, len(s.b)+diff) + copy(b, s.b[:oldStart]) + copy(b[end:], s.b[oldEnd:]) + s.b = b + } else { + s.b = append(s.b[end:], s.b[oldEnd:]...) + } + s.next = end + (s.next - s.end) + s.end = end + } +} + +// replace replaces the current token with repl. +func (s *scanner) replace(repl string) { + s.resizeRange(s.start, s.end, len(repl)) + copy(s.b[s.start:], repl) +} + +// gobble removes the current token from the input. +// Caller must call scan after calling gobble. +func (s *scanner) gobble(e error) { + s.setError(e) + if s.start == 0 { + s.b = s.b[:+copy(s.b, s.b[s.next:])] + s.end = 0 + } else { + s.b = s.b[:s.start-1+copy(s.b[s.start-1:], s.b[s.end:])] + s.end = s.start - 1 + } + s.next = s.start +} + +// deleteRange removes the given range from s.b before the current token. +func (s *scanner) deleteRange(start, end int) { + s.setError(errSyntax) + s.b = s.b[:start+copy(s.b[start:], s.b[end:])] + diff := end - start + s.next -= diff + s.start -= diff + s.end -= diff +} + +// scan parses the next token of a BCP 47 string. Tokens that are larger +// than 8 characters or include non-alphanumeric characters result in an error +// and are gobbled and removed from the output. +// It returns the end position of the last token consumed. +func (s *scanner) scan() (end int) { + end = s.end + s.token = nil + for s.start = s.next; s.next < len(s.b); { + i := bytes.IndexByte(s.b[s.next:], '-') + if i == -1 { + s.end = len(s.b) + s.next = len(s.b) + i = s.end - s.start + } else { + s.end = s.next + i + s.next = s.end + 1 + } + token := s.b[s.start:s.end] + if i < 1 || i > 8 || !isAlphaNum(token) { + s.gobble(errSyntax) + continue + } + s.token = token + return end + } + if n := len(s.b); n > 0 && s.b[n-1] == '-' { + s.setError(errSyntax) + s.b = s.b[:len(s.b)-1] + } + s.done = true + return end +} + +// acceptMinSize parses multiple tokens of the given size or greater. +// It returns the end position of the last token consumed. +func (s *scanner) acceptMinSize(min int) (end int) { + end = s.end + s.scan() + for ; len(s.token) >= min; s.scan() { + end = s.end + } + return end +} + +// Parse parses the given BCP 47 string and returns a valid Tag. If parsing +// failed it returns an error and any part of the tag that could be parsed. +// If parsing succeeded but an unknown value was found, it returns +// ValueError. The Tag returned in this case is just stripped of the unknown +// value. All other values are preserved. It accepts tags in the BCP 47 format +// and extensions to this standard defined in +// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// The resulting tag is canonicalized using the default canonicalization type. +func Parse(s string) (t Tag, err error) { + return Default.Parse(s) +} + +// Parse parses the given BCP 47 string and returns a valid Tag. If parsing +// failed it returns an error and any part of the tag that could be parsed. +// If parsing succeeded but an unknown value was found, it returns +// ValueError. The Tag returned in this case is just stripped of the unknown +// value. All other values are preserved. It accepts tags in the BCP 47 format +// and extensions to this standard defined in +// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// The resulting tag is canonicalized using the the canonicalization type c. +func (c CanonType) Parse(s string) (t Tag, err error) { + // TODO: consider supporting old-style locale key-value pairs. + if s == "" { + return und, errSyntax + } + if len(s) <= maxAltTaglen { + b := [maxAltTaglen]byte{} + for i, c := range s { + // Generating invalid UTF-8 is okay as it won't match. + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } else if c == '_' { + c = '-' + } + b[i] = byte(c) + } + if t, ok := grandfathered(b); ok { + return t, nil + } + } + scan := makeScannerString(s) + t, err = parse(&scan, s) + t, changed := t.canonicalize(c) + if changed { + t.remakeString() + } + return t, err +} + +func parse(scan *scanner, s string) (t Tag, err error) { + t = und + var end int + if n := len(scan.token); n <= 1 { + scan.toLower(0, len(scan.b)) + if n == 0 || scan.token[0] != 'x' { + return t, errSyntax + } + end = parseExtensions(scan) + } else if n >= 4 { + return und, errSyntax + } else { // the usual case + t, end = parseTag(scan) + if n := len(scan.token); n == 1 { + t.pExt = uint16(end) + end = parseExtensions(scan) + } else if end < len(scan.b) { + scan.setError(errSyntax) + scan.b = scan.b[:end] + } + } + if int(t.pVariant) < len(scan.b) { + if end < len(s) { + s = s[:end] + } + if len(s) > 0 && tag.Compare(s, scan.b) == 0 { + t.str = s + } else { + t.str = string(scan.b) + } + } else { + t.pVariant, t.pExt = 0, 0 + } + return t, scan.err +} + +// parseTag parses language, script, region and variants. +// It returns a Tag and the end position in the input that was parsed. +func parseTag(scan *scanner) (t Tag, end int) { + var e error + // TODO: set an error if an unknown lang, script or region is encountered. + t.lang, e = getLangID(scan.token) + scan.setError(e) + scan.replace(t.lang.String()) + langStart := scan.start + end = scan.scan() + for len(scan.token) == 3 && isAlpha(scan.token[0]) { + // From http://tools.ietf.org/html/bcp47, - tags are equivalent + // to a tag of the form . + lang, e := getLangID(scan.token) + if lang != 0 { + t.lang = lang + copy(scan.b[langStart:], lang.String()) + scan.b[langStart+3] = '-' + scan.start = langStart + 4 + } + scan.gobble(e) + end = scan.scan() + } + if len(scan.token) == 4 && isAlpha(scan.token[0]) { + t.script, e = getScriptID(script, scan.token) + if t.script == 0 { + scan.gobble(e) + } + end = scan.scan() + } + if n := len(scan.token); n >= 2 && n <= 3 { + t.region, e = getRegionID(scan.token) + if t.region == 0 { + scan.gobble(e) + } else { + scan.replace(t.region.String()) + } + end = scan.scan() + } + scan.toLower(scan.start, len(scan.b)) + t.pVariant = byte(end) + end = parseVariants(scan, end, t) + t.pExt = uint16(end) + return t, end +} + +var separator = []byte{'-'} + +// parseVariants scans tokens as long as each token is a valid variant string. +// Duplicate variants are removed. +func parseVariants(scan *scanner, end int, t Tag) int { + start := scan.start + varIDBuf := [4]uint8{} + variantBuf := [4][]byte{} + varID := varIDBuf[:0] + variant := variantBuf[:0] + last := -1 + needSort := false + for ; len(scan.token) >= 4; scan.scan() { + // TODO: measure the impact of needing this conversion and redesign + // the data structure if there is an issue. + v, ok := variantIndex[string(scan.token)] + if !ok { + // unknown variant + // TODO: allow user-defined variants? + scan.gobble(mkErrInvalid(scan.token)) + continue + } + varID = append(varID, v) + variant = append(variant, scan.token) + if !needSort { + if last < int(v) { + last = int(v) + } else { + needSort = true + // There is no legal combinations of more than 7 variants + // (and this is by no means a useful sequence). + const maxVariants = 8 + if len(varID) > maxVariants { + break + } + } + } + end = scan.end + } + if needSort { + sort.Sort(variantsSort{varID, variant}) + k, l := 0, -1 + for i, v := range varID { + w := int(v) + if l == w { + // Remove duplicates. + continue + } + varID[k] = varID[i] + variant[k] = variant[i] + k++ + l = w + } + if str := bytes.Join(variant[:k], separator); len(str) == 0 { + end = start - 1 + } else { + scan.resizeRange(start, end, len(str)) + copy(scan.b[scan.start:], str) + end = scan.end + } + } + return end +} + +type variantsSort struct { + i []uint8 + v [][]byte +} + +func (s variantsSort) Len() int { + return len(s.i) +} + +func (s variantsSort) Swap(i, j int) { + s.i[i], s.i[j] = s.i[j], s.i[i] + s.v[i], s.v[j] = s.v[j], s.v[i] +} + +func (s variantsSort) Less(i, j int) bool { + return s.i[i] < s.i[j] +} + +type bytesSort [][]byte + +func (b bytesSort) Len() int { + return len(b) +} + +func (b bytesSort) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b bytesSort) Less(i, j int) bool { + return bytes.Compare(b[i], b[j]) == -1 +} + +// parseExtensions parses and normalizes the extensions in the buffer. +// It returns the last position of scan.b that is part of any extension. +// It also trims scan.b to remove excess parts accordingly. +func parseExtensions(scan *scanner) int { + start := scan.start + exts := [][]byte{} + private := []byte{} + end := scan.end + for len(scan.token) == 1 { + extStart := scan.start + ext := scan.token[0] + end = parseExtension(scan) + extension := scan.b[extStart:end] + if len(extension) < 3 || (ext != 'x' && len(extension) < 4) { + scan.setError(errSyntax) + end = extStart + continue + } else if start == extStart && (ext == 'x' || scan.start == len(scan.b)) { + scan.b = scan.b[:end] + return end + } else if ext == 'x' { + private = extension + break + } + exts = append(exts, extension) + } + sort.Sort(bytesSort(exts)) + if len(private) > 0 { + exts = append(exts, private) + } + scan.b = scan.b[:start] + if len(exts) > 0 { + scan.b = append(scan.b, bytes.Join(exts, separator)...) + } else if start > 0 { + // Strip trailing '-'. + scan.b = scan.b[:start-1] + } + return end +} + +// parseExtension parses a single extension and returns the position of +// the extension end. +func parseExtension(scan *scanner) int { + start, end := scan.start, scan.end + switch scan.token[0] { + case 'u': + attrStart := end + scan.scan() + for last := []byte{}; len(scan.token) > 2; scan.scan() { + if bytes.Compare(scan.token, last) != -1 { + // Attributes are unsorted. Start over from scratch. + p := attrStart + 1 + scan.next = p + attrs := [][]byte{} + for scan.scan(); len(scan.token) > 2; scan.scan() { + attrs = append(attrs, scan.token) + end = scan.end + } + sort.Sort(bytesSort(attrs)) + copy(scan.b[p:], bytes.Join(attrs, separator)) + break + } + last = scan.token + end = scan.end + } + var last, key []byte + for attrEnd := end; len(scan.token) == 2; last = key { + key = scan.token + keyEnd := scan.end + end = scan.acceptMinSize(3) + // TODO: check key value validity + if keyEnd == end || bytes.Compare(key, last) != 1 { + // We have an invalid key or the keys are not sorted. + // Start scanning keys from scratch and reorder. + p := attrEnd + 1 + scan.next = p + keys := [][]byte{} + for scan.scan(); len(scan.token) == 2; { + keyStart, keyEnd := scan.start, scan.end + end = scan.acceptMinSize(3) + if keyEnd != end { + keys = append(keys, scan.b[keyStart:end]) + } else { + scan.setError(errSyntax) + end = keyStart + } + } + sort.Sort(bytesSort(keys)) + reordered := bytes.Join(keys, separator) + if e := p + len(reordered); e < end { + scan.deleteRange(e, end) + end = e + } + copy(scan.b[p:], bytes.Join(keys, separator)) + break + } + } + case 't': + scan.scan() + if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) { + _, end = parseTag(scan) + scan.toLower(start, end) + } + for len(scan.token) == 2 && !isAlpha(scan.token[1]) { + end = scan.acceptMinSize(3) + } + case 'x': + end = scan.acceptMinSize(1) + default: + end = scan.acceptMinSize(2) + } + return end +} + +// Compose creates a Tag from individual parts, which may be of type Tag, Base, +// Script, Region, Variant, []Variant, Extension, []Extension or error. If a +// Base, Script or Region or slice of type Variant or Extension is passed more +// than once, the latter will overwrite the former. Variants and Extensions are +// accumulated, but if two extensions of the same type are passed, the latter +// will replace the former. A Tag overwrites all former values and typically +// only makes sense as the first argument. The resulting tag is returned after +// canonicalizing using the Default CanonType. If one or more errors are +// encountered, one of the errors is returned. +func Compose(part ...interface{}) (t Tag, err error) { + return Default.Compose(part...) +} + +// Compose creates a Tag from individual parts, which may be of type Tag, Base, +// Script, Region, Variant, []Variant, Extension, []Extension or error. If a +// Base, Script or Region or slice of type Variant or Extension is passed more +// than once, the latter will overwrite the former. Variants and Extensions are +// accumulated, but if two extensions of the same type are passed, the latter +// will replace the former. A Tag overwrites all former values and typically +// only makes sense as the first argument. The resulting tag is returned after +// canonicalizing using CanonType c. If one or more errors are encountered, +// one of the errors is returned. +func (c CanonType) Compose(part ...interface{}) (t Tag, err error) { + var b builder + if err = b.update(part...); err != nil { + return und, err + } + t, _ = b.tag.canonicalize(c) + + if len(b.ext) > 0 || len(b.variant) > 0 { + sort.Sort(sortVariant(b.variant)) + sort.Strings(b.ext) + if b.private != "" { + b.ext = append(b.ext, b.private) + } + n := maxCoreSize + tokenLen(b.variant...) + tokenLen(b.ext...) + buf := make([]byte, n) + p := t.genCoreBytes(buf) + t.pVariant = byte(p) + p += appendTokens(buf[p:], b.variant...) + t.pExt = uint16(p) + p += appendTokens(buf[p:], b.ext...) + t.str = string(buf[:p]) + } else if b.private != "" { + t.str = b.private + t.remakeString() + } + return +} + +type builder struct { + tag Tag + + private string // the x extension + ext []string + variant []string + + err error +} + +func (b *builder) addExt(e string) { + if e == "" { + } else if e[0] == 'x' { + b.private = e + } else { + b.ext = append(b.ext, e) + } +} + +var errInvalidArgument = errors.New("invalid Extension or Variant") + +func (b *builder) update(part ...interface{}) (err error) { + replace := func(l *[]string, s string, eq func(a, b string) bool) bool { + if s == "" { + b.err = errInvalidArgument + return true + } + for i, v := range *l { + if eq(v, s) { + (*l)[i] = s + return true + } + } + return false + } + for _, x := range part { + switch v := x.(type) { + case Tag: + b.tag.lang = v.lang + b.tag.region = v.region + b.tag.script = v.script + if v.str != "" { + b.variant = nil + for x, s := "", v.str[v.pVariant:v.pExt]; s != ""; { + x, s = nextToken(s) + b.variant = append(b.variant, x) + } + b.ext, b.private = nil, "" + for i, e := int(v.pExt), ""; i < len(v.str); { + i, e = getExtension(v.str, i) + b.addExt(e) + } + } + case Base: + b.tag.lang = v.langID + case Script: + b.tag.script = v.scriptID + case Region: + b.tag.region = v.regionID + case Variant: + if !replace(&b.variant, v.variant, func(a, b string) bool { return a == b }) { + b.variant = append(b.variant, v.variant) + } + case Extension: + if !replace(&b.ext, v.s, func(a, b string) bool { return a[0] == b[0] }) { + b.addExt(v.s) + } + case []Variant: + b.variant = nil + for _, x := range v { + b.update(x) + } + case []Extension: + b.ext, b.private = nil, "" + for _, e := range v { + b.update(e) + } + // TODO: support parsing of raw strings based on morphology or just extensions? + case error: + err = v + } + } + return +} + +func tokenLen(token ...string) (n int) { + for _, t := range token { + n += len(t) + 1 + } + return +} + +func appendTokens(b []byte, token ...string) int { + p := 0 + for _, t := range token { + b[p] = '-' + copy(b[p+1:], t) + p += 1 + len(t) + } + return p +} + +type sortVariant []string + +func (s sortVariant) Len() int { + return len(s) +} + +func (s sortVariant) Swap(i, j int) { + s[j], s[i] = s[i], s[j] +} + +func (s sortVariant) Less(i, j int) bool { + return variantIndex[s[i]] < variantIndex[s[j]] +} + +func findExt(list []string, x byte) int { + for i, e := range list { + if e[0] == x { + return i + } + } + return -1 +} + +// getExtension returns the name, body and end position of the extension. +func getExtension(s string, p int) (end int, ext string) { + if s[p] == '-' { + p++ + } + if s[p] == 'x' { + return len(s), s[p:] + } + end = nextExtension(s, p) + return end, s[p:end] +} + +// nextExtension finds the next extension within the string, searching +// for the -- pattern from position p. +// In the fast majority of cases, language tags will have at most +// one extension and extensions tend to be small. +func nextExtension(s string, p int) int { + for n := len(s) - 3; p < n; { + if s[p] == '-' { + if s[p+2] == '-' { + return p + } + p += 3 + } else { + p++ + } + } + return len(s) +} + +var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight") + +// ParseAcceptLanguage parses the contents of a Accept-Language header as +// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and +// a list of corresponding quality weights. It is more permissive than RFC 2616 +// and may return non-nil slices even if the input is not valid. +// The Tags will be sorted by highest weight first and then by first occurrence. +// Tags with a weight of zero will be dropped. An error will be returned if the +// input could not be parsed. +func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) { + var entry string + for s != "" { + if entry, s = split(s, ','); entry == "" { + continue + } + + entry, weight := split(entry, ';') + + // Scan the language. + t, err := Parse(entry) + if err != nil { + id, ok := acceptFallback[entry] + if !ok { + return nil, nil, err + } + t = Tag{lang: id} + } + + // Scan the optional weight. + w := 1.0 + if weight != "" { + weight = consume(weight, 'q') + weight = consume(weight, '=') + // consume returns the empty string when a token could not be + // consumed, resulting in an error for ParseFloat. + if w, err = strconv.ParseFloat(weight, 32); err != nil { + return nil, nil, errInvalidWeight + } + // Drop tags with a quality weight of 0. + if w <= 0 { + continue + } + } + + tag = append(tag, t) + q = append(q, float32(w)) + } + sortStable(&tagSort{tag, q}) + return tag, q, nil +} + +// consume removes a leading token c from s and returns the result or the empty +// string if there is no such token. +func consume(s string, c byte) string { + if s == "" || s[0] != c { + return "" + } + return strings.TrimSpace(s[1:]) +} + +func split(s string, c byte) (head, tail string) { + if i := strings.IndexByte(s, c); i >= 0 { + return strings.TrimSpace(s[:i]), strings.TrimSpace(s[i+1:]) + } + return strings.TrimSpace(s), "" +} + +// Add hack mapping to deal with a small number of cases that that occur +// in Accept-Language (with reasonable frequency). +var acceptFallback = map[string]langID{ + "english": _en, + "deutsch": _de, + "italian": _it, + "french": _fr, + "*": _mul, // defined in the spec to match all languages. +} + +type tagSort struct { + tag []Tag + q []float32 +} + +func (s *tagSort) Len() int { + return len(s.q) +} + +func (s *tagSort) Less(i, j int) bool { + return s.q[i] > s.q[j] +} + +func (s *tagSort) Swap(i, j int) { + s.tag[i], s.tag[j] = s.tag[j], s.tag[i] + s.q[i], s.q[j] = s.q[j], s.q[i] +} diff --git a/vendor/golang.org/x/text/language/tables.go b/vendor/golang.org/x/text/language/tables.go new file mode 100644 index 000000000..a2aec62f1 --- /dev/null +++ b/vendor/golang.org/x/text/language/tables.go @@ -0,0 +1,3547 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package language + +import "golang.org/x/text/internal/tag" + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "30" + +const numLanguages = 8654 + +const numScripts = 230 + +const numRegions = 356 + +type fromTo struct { + from uint16 + to uint16 +} + +const nonCanonicalUnd = 1191 +const ( + _af = 21 + _am = 38 + _ar = 57 + _az = 87 + _bg = 125 + _bn = 163 + _ca = 213 + _cs = 246 + _da = 253 + _de = 265 + _el = 305 + _en = 308 + _es = 313 + _et = 315 + _fa = 323 + _fi = 332 + _fil = 334 + _fr = 345 + _gu = 413 + _he = 437 + _hi = 439 + _hr = 458 + _hu = 462 + _hy = 464 + _id = 474 + _is = 496 + _it = 497 + _ja = 504 + _ka = 520 + _kk = 570 + _km = 578 + _kn = 585 + _ko = 587 + _ky = 641 + _lo = 687 + _lt = 695 + _lv = 702 + _mk = 758 + _ml = 763 + _mn = 770 + _mo = 775 + _mr = 786 + _ms = 790 + _mul = 797 + _my = 808 + _nb = 830 + _ne = 840 + _nl = 862 + _no = 870 + _pa = 916 + _pl = 938 + _pt = 951 + _ro = 979 + _ru = 985 + _sh = 1021 + _si = 1026 + _sk = 1032 + _sl = 1036 + _sq = 1063 + _sr = 1064 + _sv = 1082 + _sw = 1083 + _ta = 1094 + _te = 1111 + _th = 1121 + _tl = 1136 + _tn = 1142 + _tr = 1152 + _uk = 1188 + _ur = 1194 + _uz = 1202 + _vi = 1209 + _zh = 1311 + _zu = 1316 + _jbo = 507 + _ami = 1639 + _bnn = 2346 + _hak = 431 + _tlh = 14456 + _lb = 652 + _nv = 890 + _pwn = 12044 + _tao = 14177 + _tay = 14187 + _tsu = 14651 + _nn = 865 + _sfb = 13618 + _vgt = 15690 + _sgg = 13649 + _cmn = 2996 + _nan = 826 + _hsn = 460 +) + +const langPrivateStart = 0x2f67 + +const langPrivateEnd = 0x316e + +// lang holds an alphabetically sorted list of ISO-639 language identifiers. +// All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. +// For 2-byte language identifiers, the two successive bytes have the following meaning: +// - if the first letter of the 2- and 3-letter ISO codes are the same: +// the second and third letter of the 3-letter ISO code. +// - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. +// For 3-byte language identifiers the 4th byte is 0. +const lang tag.Index = "" + // Size: 5280 bytes + "---\x00aaaraai\x00aak\x00aau\x00abbkabi\x00abr\x00abt\x00aby\x00acd\x00a" + + "ce\x00ach\x00ada\x00ade\x00adj\x00ady\x00adz\x00aeveaeb\x00aey\x00affrag" + + "c\x00agd\x00agg\x00agm\x00ago\x00agq\x00aha\x00ahl\x00aho\x00ajg\x00akka" + + "akk\x00ala\x00ali\x00aln\x00alt\x00ammhamm\x00amn\x00amo\x00amp\x00anrga" + + "nc\x00ank\x00ann\x00any\x00aoj\x00aom\x00aoz\x00apc\x00apd\x00ape\x00apr" + + "\x00aps\x00apz\x00arraarc\x00arh\x00arn\x00aro\x00arq\x00ars\x00ary\x00a" + + "rz\x00assmasa\x00ase\x00asg\x00aso\x00ast\x00ata\x00atg\x00atj\x00auy" + + "\x00avvaavl\x00avn\x00avt\x00avu\x00awa\x00awb\x00awo\x00awx\x00ayymayb" + + "\x00azzebaakbal\x00ban\x00bap\x00bar\x00bas\x00bav\x00bax\x00bba\x00bbb" + + "\x00bbc\x00bbd\x00bbj\x00bbp\x00bbr\x00bcf\x00bch\x00bci\x00bcm\x00bcn" + + "\x00bco\x00bcq\x00bcu\x00bdd\x00beelbef\x00beh\x00bej\x00bem\x00bet\x00b" + + "ew\x00bex\x00bez\x00bfd\x00bfq\x00bft\x00bfy\x00bgulbgc\x00bgn\x00bgx" + + "\x00bhihbhb\x00bhg\x00bhi\x00bhk\x00bhl\x00bho\x00bhy\x00biisbib\x00big" + + "\x00bik\x00bim\x00bin\x00bio\x00biq\x00bjh\x00bji\x00bjj\x00bjn\x00bjo" + + "\x00bjr\x00bjz\x00bkc\x00bkm\x00bkq\x00bku\x00bkv\x00blt\x00bmambmh\x00b" + + "mk\x00bmq\x00bmu\x00bnenbng\x00bnm\x00bnp\x00boodboj\x00bom\x00bon\x00bp" + + "y\x00bqc\x00bqi\x00bqp\x00bqv\x00brrebra\x00brh\x00brx\x00brz\x00bsosbsj" + + "\x00bsq\x00bss\x00bst\x00bto\x00btt\x00btv\x00bua\x00buc\x00bud\x00bug" + + "\x00buk\x00bum\x00buo\x00bus\x00buu\x00bvb\x00bwd\x00bwr\x00bxh\x00bye" + + "\x00byn\x00byr\x00bys\x00byv\x00byx\x00bza\x00bze\x00bzf\x00bzh\x00bzw" + + "\x00caatcan\x00cbj\x00cch\x00ccp\x00ceheceb\x00cfa\x00cgg\x00chhachk\x00" + + "chm\x00cho\x00chp\x00chr\x00cja\x00cjm\x00cjv\x00ckb\x00ckl\x00cko\x00ck" + + "y\x00cla\x00cme\x00cooscop\x00cps\x00crrecrj\x00crk\x00crl\x00crm\x00crs" + + "\x00csescsb\x00csw\x00ctd\x00cuhucvhvcyymdaandad\x00daf\x00dag\x00dah" + + "\x00dak\x00dar\x00dav\x00dbd\x00dbq\x00dcc\x00ddn\x00deeuded\x00den\x00d" + + "ga\x00dgh\x00dgi\x00dgl\x00dgr\x00dgz\x00dia\x00dje\x00dnj\x00dob\x00doi" + + "\x00dop\x00dow\x00dri\x00drs\x00dsb\x00dtm\x00dtp\x00dts\x00dty\x00dua" + + "\x00duc\x00dud\x00dug\x00dvivdva\x00dww\x00dyo\x00dyu\x00dzzodzg\x00ebu" + + "\x00eeweefi\x00egl\x00egy\x00eky\x00elllema\x00emi\x00enngenn\x00enq\x00" + + "eopoeri\x00es\x00\x05esu\x00etstetr\x00ett\x00etu\x00etx\x00euusewo\x00e" + + "xt\x00faasfaa\x00fab\x00fag\x00fai\x00fan\x00ffulffi\x00ffm\x00fiinfia" + + "\x00fil\x00fit\x00fjijflr\x00fmp\x00foaofod\x00fon\x00for\x00fpe\x00fqs" + + "\x00frrafrc\x00frp\x00frr\x00frs\x00fub\x00fud\x00fue\x00fuf\x00fuh\x00f" + + "uq\x00fur\x00fuv\x00fuy\x00fvr\x00fyrygalegaa\x00gaf\x00gag\x00gah\x00ga" + + "j\x00gam\x00gan\x00gaw\x00gay\x00gbf\x00gbm\x00gby\x00gbz\x00gcr\x00gdla" + + "gde\x00gdn\x00gdr\x00geb\x00gej\x00gel\x00gez\x00gfk\x00ggn\x00ghs\x00gi" + + "l\x00gim\x00gjk\x00gjn\x00gju\x00gkn\x00gkp\x00gllgglk\x00gmm\x00gmv\x00" + + "gnrngnd\x00gng\x00god\x00gof\x00goi\x00gom\x00gon\x00gor\x00gos\x00got" + + "\x00grc\x00grt\x00grw\x00gsw\x00guujgub\x00guc\x00gud\x00gur\x00guw\x00g" + + "ux\x00guz\x00gvlvgvf\x00gvr\x00gvs\x00gwc\x00gwi\x00gwt\x00gyi\x00haauha" + + "g\x00hak\x00ham\x00haw\x00haz\x00hbb\x00hdy\x00heebhhy\x00hiinhia\x00hif" + + "\x00hig\x00hih\x00hil\x00hla\x00hlu\x00hmd\x00hmt\x00hnd\x00hne\x00hnj" + + "\x00hnn\x00hno\x00homohoc\x00hoj\x00hot\x00hrrvhsb\x00hsn\x00htathuunhui" + + "\x00hyyehzerianaian\x00iar\x00iba\x00ibb\x00iby\x00ica\x00ich\x00idndidd" + + "\x00idi\x00idu\x00ieleigboigb\x00ige\x00iiiiijj\x00ikpkikk\x00ikt\x00ikw" + + "\x00ikx\x00ilo\x00imo\x00inndinh\x00iodoiou\x00iri\x00isslittaiukuiw\x00" + + "\x03iwm\x00iws\x00izh\x00izi\x00japnjab\x00jam\x00jbo\x00jbu\x00jen\x00j" + + "gk\x00jgo\x00ji\x00\x06jib\x00jmc\x00jml\x00jra\x00jut\x00jvavjwavkaatka" + + "a\x00kab\x00kac\x00kad\x00kai\x00kaj\x00kam\x00kao\x00kbd\x00kbm\x00kbp" + + "\x00kbq\x00kbx\x00kby\x00kcg\x00kck\x00kcl\x00kct\x00kde\x00kdh\x00kdl" + + "\x00kdt\x00kea\x00ken\x00kez\x00kfo\x00kfr\x00kfy\x00kgonkge\x00kgf\x00k" + + "gp\x00kha\x00khb\x00khn\x00khq\x00khs\x00kht\x00khw\x00khz\x00kiikkij" + + "\x00kiu\x00kiw\x00kjuakjd\x00kjg\x00kjs\x00kjy\x00kkazkkc\x00kkj\x00klal" + + "kln\x00klq\x00klt\x00klx\x00kmhmkmb\x00kmh\x00kmo\x00kms\x00kmu\x00kmw" + + "\x00knanknp\x00koorkoi\x00kok\x00kol\x00kos\x00koz\x00kpe\x00kpf\x00kpo" + + "\x00kpr\x00kpx\x00kqb\x00kqf\x00kqs\x00kqy\x00kraukrc\x00kri\x00krj\x00k" + + "rl\x00krs\x00kru\x00ksasksb\x00ksd\x00ksf\x00ksh\x00ksj\x00ksr\x00ktb" + + "\x00ktm\x00kto\x00kuurkub\x00kud\x00kue\x00kuj\x00kum\x00kun\x00kup\x00k" + + "us\x00kvomkvg\x00kvr\x00kvx\x00kw\x00\x01kwj\x00kwo\x00kxa\x00kxc\x00kxm" + + "\x00kxp\x00kxw\x00kxz\x00kyirkye\x00kyx\x00kzr\x00laatlab\x00lad\x00lag" + + "\x00lah\x00laj\x00las\x00lbtzlbe\x00lbu\x00lbw\x00lcm\x00lcp\x00ldb\x00l" + + "ed\x00lee\x00lem\x00lep\x00leq\x00leu\x00lez\x00lguglgg\x00liimlia\x00li" + + "d\x00lif\x00lig\x00lih\x00lij\x00lis\x00ljp\x00lki\x00lkt\x00lle\x00lln" + + "\x00lmn\x00lmo\x00lmp\x00lninlns\x00lnu\x00loaoloj\x00lok\x00lol\x00lor" + + "\x00los\x00loz\x00lrc\x00ltitltg\x00luublua\x00luo\x00luy\x00luz\x00lvav" + + "lwl\x00lzh\x00lzz\x00mad\x00maf\x00mag\x00mai\x00mak\x00man\x00mas\x00ma" + + "w\x00maz\x00mbh\x00mbo\x00mbq\x00mbu\x00mbw\x00mci\x00mcp\x00mcq\x00mcr" + + "\x00mcu\x00mda\x00mde\x00mdf\x00mdh\x00mdj\x00mdr\x00mdx\x00med\x00mee" + + "\x00mek\x00men\x00mer\x00met\x00meu\x00mfa\x00mfe\x00mfn\x00mfo\x00mfq" + + "\x00mglgmgh\x00mgl\x00mgo\x00mgp\x00mgy\x00mhahmhi\x00mhl\x00mirimif\x00" + + "min\x00mis\x00miw\x00mkkdmki\x00mkl\x00mkp\x00mkw\x00mlalmle\x00mlp\x00m" + + "ls\x00mmo\x00mmu\x00mmx\x00mnonmna\x00mnf\x00mni\x00mnw\x00moolmoa\x00mo" + + "e\x00moh\x00mos\x00mox\x00mpp\x00mps\x00mpt\x00mpx\x00mql\x00mrarmrd\x00" + + "mrj\x00mro\x00mssamtltmtc\x00mtf\x00mti\x00mtr\x00mua\x00mul\x00mur\x00m" + + "us\x00mva\x00mvn\x00mvy\x00mwk\x00mwr\x00mwv\x00mxc\x00mxm\x00myyamyk" + + "\x00mym\x00myv\x00myw\x00myx\x00myz\x00mzk\x00mzm\x00mzn\x00mzp\x00mzw" + + "\x00mzz\x00naaunac\x00naf\x00nah\x00nak\x00nan\x00nap\x00naq\x00nas\x00n" + + "bobnca\x00nce\x00ncf\x00nch\x00nco\x00ncu\x00nddendc\x00nds\x00neepneb" + + "\x00new\x00nex\x00nfr\x00ngdonga\x00ngb\x00ngl\x00nhb\x00nhe\x00nhw\x00n" + + "if\x00nii\x00nij\x00nin\x00niu\x00niy\x00niz\x00njo\x00nkg\x00nko\x00nll" + + "dnmg\x00nmz\x00nnnonnf\x00nnh\x00nnk\x00nnm\x00noornod\x00noe\x00non\x00" + + "nop\x00nou\x00nqo\x00nrblnrb\x00nsk\x00nsn\x00nso\x00nss\x00ntm\x00ntr" + + "\x00nui\x00nup\x00nus\x00nuv\x00nux\x00nvavnwb\x00nxq\x00nxr\x00nyyanym" + + "\x00nyn\x00nzi\x00occiogc\x00ojjiokr\x00okv\x00omrmong\x00onn\x00ons\x00" + + "opm\x00orrioro\x00oru\x00osssosa\x00ota\x00otk\x00ozm\x00paanpag\x00pal" + + "\x00pam\x00pap\x00pau\x00pbi\x00pcd\x00pcm\x00pdc\x00pdt\x00ped\x00peo" + + "\x00pex\x00pfl\x00phl\x00phn\x00pilipil\x00pip\x00pka\x00pko\x00plolpla" + + "\x00pms\x00png\x00pnn\x00pnt\x00pon\x00ppo\x00pra\x00prd\x00prg\x00psusp" + + "ss\x00ptorptp\x00puu\x00pwa\x00quuequc\x00qug\x00rai\x00raj\x00rao\x00rc" + + "f\x00rej\x00rel\x00res\x00rgn\x00rhg\x00ria\x00rif\x00rjs\x00rkt\x00rmoh" + + "rmf\x00rmo\x00rmt\x00rmu\x00rnunrna\x00rng\x00roonrob\x00rof\x00roo\x00r" + + "ro\x00rtm\x00ruusrue\x00rug\x00rw\x00\x04rwk\x00rwo\x00ryu\x00saansaf" + + "\x00sah\x00saq\x00sas\x00sat\x00saz\x00sba\x00sbe\x00sbp\x00scrdsck\x00s" + + "cl\x00scn\x00sco\x00scs\x00sdndsdc\x00sdh\x00semesef\x00seh\x00sei\x00se" + + "s\x00sgagsga\x00sgs\x00sgw\x00sgz\x00sh\x00\x02shi\x00shk\x00shn\x00shu" + + "\x00siinsid\x00sig\x00sil\x00sim\x00sjr\x00sklkskc\x00skr\x00sks\x00sllv" + + "sld\x00sli\x00sll\x00sly\x00smmosma\x00smi\x00smj\x00smn\x00smp\x00smq" + + "\x00sms\x00snnasnc\x00snk\x00snp\x00snx\x00sny\x00soomsok\x00soq\x00sou" + + "\x00soy\x00spd\x00spl\x00sps\x00sqqisrrpsrb\x00srn\x00srr\x00srx\x00sssw" + + "ssd\x00ssg\x00ssy\x00stotstk\x00stq\x00suunsua\x00sue\x00suk\x00sur\x00s" + + "us\x00svweswwaswb\x00swc\x00swg\x00swp\x00swv\x00sxn\x00sxw\x00syl\x00sy" + + "r\x00szl\x00taamtaj\x00tal\x00tan\x00taq\x00tbc\x00tbd\x00tbf\x00tbg\x00" + + "tbo\x00tbw\x00tbz\x00tci\x00tcy\x00tdd\x00tdg\x00tdh\x00teelted\x00tem" + + "\x00teo\x00tet\x00tfi\x00tggktgc\x00tgo\x00tgu\x00thhathl\x00thq\x00thr" + + "\x00tiirtif\x00tig\x00tik\x00tim\x00tio\x00tiv\x00tkuktkl\x00tkr\x00tkt" + + "\x00tlgltlf\x00tlx\x00tly\x00tmh\x00tmy\x00tnsntnh\x00toontof\x00tog\x00" + + "toq\x00tpi\x00tpm\x00tpz\x00tqo\x00trurtru\x00trv\x00trw\x00tssotsd\x00t" + + "sf\x00tsg\x00tsj\x00tsw\x00ttatttd\x00tte\x00ttj\x00ttr\x00tts\x00ttt" + + "\x00tuh\x00tul\x00tum\x00tuq\x00tvd\x00tvl\x00tvu\x00twwitwh\x00twq\x00t" + + "xg\x00tyahtya\x00tyv\x00tzm\x00ubu\x00udm\x00ugiguga\x00ukkruli\x00umb" + + "\x00und\x00unr\x00unx\x00urrduri\x00urt\x00urw\x00usa\x00utr\x00uvh\x00u" + + "vl\x00uzzbvag\x00vai\x00van\x00veenvec\x00vep\x00viievic\x00viv\x00vls" + + "\x00vmf\x00vmw\x00voolvot\x00vro\x00vun\x00vut\x00walnwae\x00waj\x00wal" + + "\x00wan\x00war\x00wbp\x00wbq\x00wbr\x00wci\x00wer\x00wgi\x00whg\x00wib" + + "\x00wiu\x00wiv\x00wja\x00wji\x00wls\x00wmo\x00wnc\x00wni\x00wnu\x00woolw" + + "ob\x00wos\x00wrs\x00wsk\x00wtm\x00wuu\x00wuv\x00wwa\x00xav\x00xbi\x00xcr" + + "\x00xes\x00xhhoxla\x00xlc\x00xld\x00xmf\x00xmn\x00xmr\x00xna\x00xnr\x00x" + + "og\x00xon\x00xpr\x00xrb\x00xsa\x00xsi\x00xsm\x00xsr\x00xwe\x00yam\x00yao" + + "\x00yap\x00yas\x00yat\x00yav\x00yay\x00yaz\x00yba\x00ybb\x00yby\x00yer" + + "\x00ygr\x00ygw\x00yiidyko\x00yle\x00ylg\x00yll\x00yml\x00yooryon\x00yrb" + + "\x00yre\x00yrl\x00yss\x00yua\x00yue\x00yuj\x00yut\x00yuw\x00zahazag\x00z" + + "bl\x00zdj\x00zea\x00zgh\x00zhhozia\x00zlm\x00zmi\x00zne\x00zuulzxx\x00zz" + + "a\x00\xff\xff\xff\xff" + +const langNoIndexOffset = 1319 + +// langNoIndex is a bit vector of all 3-letter language codes that are not used as an index +// in lookup tables. The language ids for these language codes are derived directly +// from the letters and are not consecutive. +// Size: 2197 bytes, 2197 elements +var langNoIndex = [2197]uint8{ + // Entry 0 - 3F + 0xff, 0xf8, 0xed, 0xfe, 0xeb, 0xd7, 0x3b, 0xd2, + 0xfb, 0xbf, 0x7a, 0xfa, 0x37, 0x1d, 0x3c, 0x57, + 0x6e, 0x97, 0x73, 0x38, 0xfb, 0xea, 0xbf, 0x70, + 0xad, 0x03, 0xff, 0xff, 0xcf, 0x05, 0x84, 0x62, + 0xe9, 0xbf, 0xfd, 0xbf, 0xbf, 0xf7, 0xfd, 0x77, + 0x0f, 0xff, 0xef, 0x6f, 0xff, 0xfb, 0xdf, 0xe2, + 0xc9, 0xf8, 0x7f, 0x7e, 0x4d, 0xb8, 0x0a, 0x6a, + 0x7c, 0xea, 0xe3, 0xfa, 0x7a, 0xbf, 0x67, 0xff, + // Entry 40 - 7F + 0xff, 0xff, 0xff, 0xdf, 0x2a, 0x54, 0x91, 0xc0, + 0x5d, 0xe3, 0x97, 0x14, 0x07, 0x20, 0xdd, 0xed, + 0x9f, 0x3f, 0xc9, 0x21, 0xf8, 0x3f, 0x94, 0x35, + 0x7c, 0x5f, 0xff, 0x5f, 0x8e, 0x6e, 0xdf, 0xff, + 0xff, 0xff, 0x55, 0x7c, 0xd3, 0xfd, 0xbf, 0xb5, + 0x7b, 0xdf, 0x7f, 0xf7, 0xca, 0xfe, 0xdb, 0xa3, + 0xa8, 0xff, 0x1f, 0x67, 0x7f, 0xeb, 0xef, 0xce, + 0xff, 0xff, 0x9f, 0xff, 0xb7, 0xef, 0xfe, 0xcf, + // Entry 80 - BF + 0xdb, 0xff, 0xf3, 0xcd, 0xfb, 0x2f, 0xff, 0xff, + 0xbb, 0xee, 0xf7, 0xbd, 0xdb, 0xff, 0x5f, 0xf7, + 0xfd, 0xf2, 0xfd, 0xff, 0x5e, 0x2f, 0x3b, 0xba, + 0x7e, 0xff, 0xff, 0xfe, 0xf7, 0xff, 0xdd, 0xff, + 0xfd, 0xdf, 0xfb, 0xfe, 0x9d, 0xb4, 0xd3, 0xff, + 0xef, 0xff, 0xdf, 0xf7, 0x7f, 0xb7, 0xfd, 0xd5, + 0xa5, 0x77, 0x40, 0xff, 0x9c, 0xc1, 0x41, 0x2c, + 0x08, 0x20, 0x41, 0x00, 0x50, 0x40, 0x00, 0x80, + // Entry C0 - FF + 0xfb, 0x4a, 0xf2, 0x9f, 0xb4, 0x42, 0x41, 0x96, + 0x1b, 0x14, 0x08, 0xf2, 0x2b, 0xe7, 0x17, 0x56, + 0x45, 0x7d, 0x0e, 0x1c, 0x37, 0x71, 0xf3, 0xef, + 0x97, 0xff, 0x5d, 0x38, 0x64, 0x08, 0x00, 0x10, + 0xbc, 0x87, 0xaf, 0xdf, 0xff, 0xf7, 0x73, 0x35, + 0x3e, 0x87, 0xc7, 0xdf, 0xff, 0x00, 0x81, 0x00, + 0xb0, 0x05, 0x80, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x40, 0x00, 0x40, 0x92, 0x21, 0x50, 0xb1, 0x5d, + // Entry 100 - 13F + 0xfd, 0xdc, 0xbe, 0x5e, 0x00, 0x00, 0x02, 0x64, + 0x0d, 0x19, 0x41, 0xdf, 0x79, 0x22, 0x00, 0x00, + 0x00, 0x5e, 0x64, 0xdc, 0x24, 0xe5, 0xd9, 0xe3, + 0xfe, 0xff, 0xfd, 0xcb, 0x9f, 0x14, 0x01, 0x0c, + 0x86, 0x00, 0xd1, 0x00, 0xf0, 0xc5, 0x67, 0x5f, + 0x56, 0x89, 0x5e, 0xb5, 0x6c, 0xaf, 0x03, 0x00, + 0x02, 0x00, 0x00, 0x00, 0xc0, 0x37, 0xda, 0x56, + 0x90, 0x69, 0x01, 0x2c, 0x96, 0x69, 0x20, 0xfb, + // Entry 140 - 17F + 0xff, 0x3f, 0x00, 0x00, 0x00, 0x01, 0x08, 0x16, + 0x01, 0x00, 0x00, 0xb0, 0x14, 0x03, 0x50, 0x06, + 0x0a, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x09, + 0x00, 0x00, 0x60, 0x10, 0x00, 0x00, 0x00, 0x10, + 0x00, 0x00, 0x44, 0x00, 0x00, 0x10, 0x00, 0x04, + 0x08, 0x00, 0x00, 0x04, 0x00, 0x80, 0x28, 0x04, + 0x00, 0x00, 0x50, 0xd5, 0x2d, 0x00, 0x64, 0x35, + 0x24, 0x52, 0xf4, 0xd4, 0xbd, 0x62, 0xc9, 0x03, + // Entry 180 - 1BF + 0x00, 0x80, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x13, 0x39, 0x01, 0xdd, 0x57, 0x98, + 0x21, 0x18, 0x81, 0x00, 0x00, 0x01, 0x40, 0x82, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x40, 0x00, 0x44, 0x00, 0x00, 0x80, 0xea, + 0xa9, 0x39, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + // Entry 1C0 - 1FF + 0x00, 0x01, 0x28, 0x05, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x20, 0x04, 0xa6, 0x00, 0x04, 0x00, 0x00, + 0x81, 0x50, 0x00, 0x00, 0x00, 0x11, 0x84, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x55, + 0x02, 0x10, 0x08, 0x04, 0x00, 0x00, 0x00, 0x40, + 0x30, 0x83, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x1e, 0xcd, 0xbf, 0x7e, 0xbf, + // Entry 200 - 23F + 0xdf, 0xc3, 0x83, 0x82, 0xc0, 0xfb, 0x57, 0x27, + 0xcd, 0x55, 0xe7, 0x01, 0x00, 0x20, 0xb2, 0xc5, + 0xa4, 0x45, 0x25, 0x9b, 0x02, 0xcf, 0xe0, 0xdf, + 0x03, 0x44, 0x08, 0x10, 0x01, 0x04, 0x01, 0xe3, + 0x92, 0x54, 0xdb, 0x28, 0xd1, 0x5f, 0xf6, 0x6d, + 0x79, 0xed, 0x1c, 0x7d, 0x04, 0x08, 0x00, 0x01, + 0x21, 0x12, 0x6c, 0x5f, 0xdd, 0x0e, 0x85, 0x4f, + 0x40, 0x40, 0x00, 0x04, 0xf1, 0xfd, 0x3d, 0x54, + // Entry 240 - 27F + 0xe8, 0x03, 0xb4, 0x27, 0x23, 0x0d, 0x00, 0x00, + 0x20, 0x7b, 0x38, 0x02, 0x05, 0x84, 0x00, 0xf0, + 0xbb, 0x7e, 0x5a, 0x00, 0x18, 0x04, 0x81, 0x00, + 0x00, 0x00, 0x80, 0x10, 0x90, 0x1c, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x10, 0x40, 0x00, 0x04, + 0x08, 0xa0, 0x70, 0xa5, 0x0c, 0x40, 0x00, 0x00, + 0x11, 0x04, 0x04, 0x68, 0x00, 0x20, 0x70, 0xff, + 0x7b, 0x7f, 0x60, 0x00, 0x05, 0x9b, 0xdd, 0x66, + // Entry 280 - 2BF + 0x03, 0x00, 0x11, 0x00, 0x00, 0x00, 0x40, 0x05, + 0xb5, 0xb6, 0x80, 0x08, 0x04, 0x00, 0x04, 0x51, + 0xe2, 0xef, 0xfd, 0x3f, 0x05, 0x09, 0x08, 0x05, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x60, + 0xe5, 0x48, 0x00, 0x81, 0x20, 0xc0, 0x05, 0x80, + 0x03, 0x00, 0x00, 0x00, 0xcc, 0x50, 0x40, 0x04, + 0x84, 0x47, 0x84, 0x40, 0x20, 0x10, 0x00, 0x20, + // Entry 2C0 - 2FF + 0x02, 0x50, 0x80, 0x11, 0x00, 0x91, 0x6c, 0xe2, + 0x50, 0x27, 0x1d, 0x11, 0x29, 0x06, 0x59, 0xe9, + 0x33, 0x08, 0x00, 0x20, 0x04, 0x40, 0x10, 0x00, + 0x00, 0x00, 0x50, 0x44, 0x92, 0x49, 0xd6, 0x5d, + 0xa7, 0x81, 0x47, 0x97, 0xfb, 0x00, 0x10, 0x00, + 0x08, 0x00, 0x80, 0x00, 0x40, 0x04, 0x00, 0x01, + 0x02, 0x00, 0x01, 0x40, 0x80, 0x00, 0x00, 0x08, + 0xd8, 0xeb, 0xf6, 0x39, 0xc4, 0x89, 0x12, 0x00, + // Entry 300 - 33F + 0x00, 0x0c, 0x04, 0x01, 0x20, 0x20, 0xdd, 0xa0, + 0x01, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, + 0x04, 0x10, 0xd0, 0x9d, 0x95, 0x13, 0x04, 0x80, + 0x00, 0x01, 0xd0, 0x12, 0x40, 0x00, 0x10, 0xb0, + 0x10, 0x62, 0x4c, 0xd2, 0x02, 0x01, 0x4a, 0x00, + 0x46, 0x04, 0x00, 0x08, 0x02, 0x00, 0x20, 0x80, + 0x00, 0x80, 0x06, 0x00, 0x08, 0x00, 0x00, 0x00, + 0x00, 0xf0, 0xd8, 0x6f, 0x15, 0x02, 0x08, 0x00, + // Entry 340 - 37F + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01, + 0x00, 0x10, 0x00, 0x00, 0x00, 0xf0, 0x84, 0xe3, + 0xdd, 0xbf, 0xf9, 0xf9, 0x3b, 0x7f, 0x7f, 0xdb, + 0xfd, 0xfc, 0xfe, 0xdf, 0xff, 0xfd, 0xff, 0xf6, + 0xfb, 0xfc, 0xf7, 0x1f, 0xff, 0xb3, 0x6c, 0xff, + 0xd9, 0xad, 0xdf, 0xfe, 0xef, 0xba, 0xdf, 0xff, + 0xff, 0xff, 0xb7, 0xdd, 0x7d, 0xbf, 0xab, 0xff, + 0xfd, 0xfd, 0xdf, 0x2f, 0x9c, 0xdf, 0xf3, 0x6f, + // Entry 380 - 3BF + 0xdf, 0xdd, 0xff, 0xfb, 0xee, 0xd2, 0xab, 0x5f, + 0xd5, 0xdf, 0x7f, 0xff, 0xeb, 0xff, 0xe4, 0x4d, + 0xf9, 0xff, 0xfe, 0xf7, 0xfd, 0xdf, 0xfb, 0xbf, + 0xee, 0xdb, 0x6f, 0xef, 0xff, 0x7f, 0xff, 0xff, + 0xf7, 0x5f, 0xd3, 0x3b, 0xfd, 0xd9, 0xdf, 0xeb, + 0xbc, 0x08, 0x05, 0x24, 0xff, 0x07, 0x70, 0xfe, + 0xe6, 0x5e, 0x00, 0x08, 0x00, 0x83, 0x3d, 0x1b, + 0x06, 0xe6, 0x72, 0x60, 0xd1, 0x3c, 0x7f, 0x44, + // Entry 3C0 - 3FF + 0x02, 0x30, 0x9f, 0x7a, 0x16, 0xbd, 0x7f, 0x57, + 0xf2, 0xff, 0x31, 0xff, 0xf2, 0x1e, 0x90, 0xf7, + 0xf1, 0xf9, 0x45, 0x80, 0x01, 0x02, 0x00, 0x00, + 0x40, 0x54, 0x9f, 0x8a, 0xd9, 0xd9, 0x0e, 0x11, + 0x84, 0x51, 0xc0, 0xf3, 0xfb, 0x47, 0x00, 0x01, + 0x05, 0xd1, 0x50, 0x58, 0x00, 0x00, 0x00, 0x10, + 0x04, 0x02, 0x00, 0x00, 0x0a, 0x00, 0x17, 0xd2, + 0xb9, 0xfd, 0xfc, 0xba, 0xfe, 0xef, 0xc7, 0xbe, + // Entry 400 - 43F + 0x53, 0x6f, 0xdf, 0xe7, 0xdb, 0x65, 0xbb, 0x7f, + 0xfa, 0xff, 0x77, 0xf3, 0xef, 0xbf, 0xfd, 0xf7, + 0xdf, 0xdf, 0x9b, 0x7f, 0xff, 0xff, 0x7f, 0x6f, + 0xf7, 0xfb, 0xeb, 0xdf, 0xbc, 0xff, 0xbf, 0x6b, + 0x7b, 0xfb, 0xff, 0xce, 0x76, 0xbd, 0xf7, 0xf7, + 0xdf, 0xdc, 0xf7, 0xf7, 0xff, 0xdf, 0xf3, 0xfe, + 0xef, 0xff, 0xff, 0xff, 0xb6, 0x7f, 0x7f, 0xde, + 0xf7, 0xb9, 0xeb, 0x77, 0xff, 0xfb, 0xbf, 0xdf, + // Entry 440 - 47F + 0xfd, 0xfe, 0xfb, 0xff, 0xfe, 0xeb, 0x1f, 0x7d, + 0x2f, 0xfd, 0xb6, 0xb5, 0xa5, 0xfc, 0xff, 0xfd, + 0x7f, 0x4e, 0xbf, 0x8e, 0xae, 0xff, 0xee, 0xdf, + 0x7f, 0xf7, 0x73, 0x02, 0x02, 0x04, 0xfc, 0xf7, + 0xff, 0xb7, 0xd7, 0xef, 0xfe, 0xcd, 0xf5, 0xce, + 0xe2, 0x8e, 0xe7, 0xbf, 0xb7, 0xff, 0x56, 0xbd, + 0xcd, 0xff, 0xfb, 0xff, 0xdf, 0xd7, 0xea, 0xff, + 0xe5, 0x5f, 0x6d, 0x0f, 0xa7, 0x51, 0x04, 0x44, + // Entry 480 - 4BF + 0x13, 0x50, 0x5d, 0xaf, 0xa6, 0xfd, 0x99, 0xfb, + 0x63, 0x1d, 0x53, 0xff, 0xef, 0xb7, 0x35, 0x20, + 0x14, 0x00, 0x55, 0x51, 0x82, 0x65, 0xf5, 0x41, + 0xe2, 0xff, 0xfc, 0xdf, 0x00, 0x05, 0xc5, 0x05, + 0x00, 0x22, 0x00, 0x74, 0x69, 0x10, 0x08, 0x04, + 0x41, 0x00, 0x01, 0x06, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x51, 0x20, 0x05, 0x04, 0x01, 0x00, 0x00, + 0x06, 0x01, 0x20, 0x00, 0x18, 0x01, 0x92, 0xb1, + // Entry 4C0 - 4FF + 0xfd, 0x47, 0x49, 0x06, 0x95, 0x06, 0x57, 0xed, + 0xfb, 0x4c, 0x1c, 0x6b, 0x83, 0x04, 0x62, 0x40, + 0x00, 0x11, 0x42, 0x00, 0x00, 0x00, 0x54, 0x83, + 0xb8, 0x4f, 0x10, 0x8c, 0x89, 0x46, 0xde, 0xf7, + 0x13, 0x31, 0x00, 0x20, 0x00, 0x00, 0x00, 0x90, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x10, 0x00, + 0x01, 0x00, 0x00, 0xf0, 0x5b, 0xf4, 0xbe, 0x3d, + 0xba, 0xcf, 0xf7, 0xaf, 0x42, 0x04, 0x84, 0x41, + // Entry 500 - 53F + 0x30, 0xff, 0x79, 0x72, 0x04, 0x00, 0x00, 0x49, + 0x2d, 0x14, 0x27, 0x57, 0xed, 0xf1, 0x3f, 0xe7, + 0x3f, 0x00, 0x00, 0x02, 0xc6, 0xa0, 0x1e, 0xf8, + 0xbb, 0xff, 0xfd, 0xfb, 0xb7, 0xfd, 0xe5, 0xf7, + 0xfd, 0xfc, 0xd5, 0xed, 0x47, 0xf4, 0x7e, 0x10, + 0x01, 0x01, 0x84, 0x6d, 0xff, 0xf7, 0xdd, 0xf9, + 0x5b, 0x05, 0x86, 0xed, 0xf5, 0x77, 0xbd, 0x3c, + 0x00, 0x00, 0x00, 0x42, 0x71, 0x42, 0x00, 0x40, + // Entry 540 - 57F + 0x00, 0x00, 0x01, 0x43, 0x19, 0x00, 0x08, 0x00, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // Entry 580 - 5BF + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xab, 0xbd, 0xe7, 0x57, 0xee, 0x13, 0x5d, + 0x09, 0xc1, 0x40, 0x21, 0xfa, 0x17, 0x01, 0x80, + 0x00, 0x00, 0x00, 0x00, 0xf0, 0xce, 0xfb, 0xbf, + 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x00, 0x30, 0x15, 0xa3, 0x10, 0x00, 0x00, 0x00, + 0x11, 0x04, 0x16, 0x00, 0x00, 0x02, 0x00, 0x81, + 0xa3, 0x01, 0x50, 0x00, 0x00, 0x83, 0x11, 0x40, + // Entry 5C0 - 5FF + 0x00, 0x00, 0x00, 0xf0, 0xdd, 0x7b, 0x3e, 0x02, + 0xaa, 0x10, 0x5d, 0x98, 0x52, 0x00, 0x80, 0x20, + 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x02, 0x02, + 0x19, 0x00, 0x10, 0x02, 0x10, 0x61, 0x5a, 0x9d, + 0x31, 0x00, 0x00, 0x00, 0x01, 0x10, 0x02, 0x20, + 0x00, 0x00, 0x01, 0x00, 0x42, 0x00, 0x20, 0x00, + 0x00, 0x1f, 0xdf, 0xf2, 0xb9, 0xff, 0xfd, 0x3f, + 0x1f, 0x18, 0xcf, 0x9c, 0xbf, 0xaf, 0x5f, 0xfe, + // Entry 600 - 63F + 0x7b, 0x4b, 0x40, 0x10, 0xe1, 0xfd, 0xaf, 0xd9, + 0xb7, 0xf6, 0xfb, 0xb3, 0xc7, 0xff, 0x6f, 0xf1, + 0x73, 0xb1, 0x7f, 0x9f, 0x7f, 0xbd, 0xfc, 0xb7, + 0xee, 0x1c, 0xfa, 0xcb, 0xef, 0xdd, 0xf9, 0xbd, + 0x6e, 0xae, 0x55, 0xfd, 0x6e, 0x81, 0x76, 0x1f, + 0xd4, 0x77, 0xf5, 0x7d, 0xfb, 0xff, 0xeb, 0xfe, + 0xbe, 0x5f, 0x46, 0x1b, 0xe9, 0x5f, 0x50, 0x18, + 0x02, 0xfa, 0xf7, 0x9d, 0x15, 0x97, 0x05, 0x0f, + // Entry 640 - 67F + 0x75, 0xc4, 0x7d, 0x81, 0x82, 0xf1, 0x57, 0x6c, + 0xff, 0xe4, 0xef, 0x6f, 0xff, 0xfc, 0xdd, 0xde, + 0xfc, 0xfd, 0x76, 0x5f, 0x7a, 0x1f, 0x00, 0x98, + 0x02, 0xfb, 0xa3, 0xef, 0xf3, 0xd6, 0xf2, 0xff, + 0xb9, 0xda, 0x7d, 0x50, 0x1e, 0x15, 0x7b, 0xb4, + 0xf5, 0x3e, 0xff, 0xff, 0xf1, 0xf7, 0xff, 0xe7, + 0x5f, 0xff, 0xff, 0x9e, 0xdb, 0xf6, 0xd7, 0xb9, + 0xef, 0x27, 0x80, 0xbb, 0xc5, 0xff, 0xff, 0xe3, + // Entry 680 - 6BF + 0x97, 0x9d, 0xbf, 0x9f, 0xf7, 0xc7, 0xfd, 0x37, + 0xce, 0x7f, 0x04, 0x1d, 0x53, 0x7f, 0xf8, 0xda, + 0x5d, 0xce, 0x7d, 0x06, 0xb9, 0xea, 0x69, 0xa0, + 0x1a, 0x20, 0x00, 0x30, 0x02, 0x04, 0x24, 0x08, + 0x04, 0x00, 0x00, 0x40, 0xd4, 0x02, 0x04, 0x00, + 0x00, 0x04, 0x00, 0x04, 0x00, 0x20, 0x01, 0x06, + 0x50, 0x00, 0x08, 0x00, 0x00, 0x00, 0x24, 0x00, + 0x04, 0x00, 0x10, 0x8c, 0x58, 0xd5, 0x0d, 0x0f, + // Entry 6C0 - 6FF + 0x14, 0x4d, 0xf1, 0x16, 0x44, 0xd1, 0x42, 0x08, + 0x40, 0x00, 0x00, 0x40, 0x00, 0x08, 0x00, 0x00, + 0x00, 0xdc, 0xfb, 0xcb, 0x0e, 0x58, 0x08, 0x41, + 0x04, 0x20, 0x04, 0x00, 0x30, 0x12, 0x40, 0x00, + 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x80, 0x10, 0x10, 0xab, + 0x6d, 0x93, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x80, 0x80, 0x25, 0x00, 0x00, + // Entry 700 - 73F + 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, + 0x80, 0x86, 0xc2, 0x00, 0x00, 0x00, 0x00, 0x01, + 0xdf, 0x18, 0x00, 0x00, 0x02, 0xf0, 0xfd, 0x79, + 0x3b, 0x00, 0x25, 0x00, 0x00, 0x00, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, + 0x03, 0x00, 0x09, 0x20, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 740 - 77F + 0x00, 0x00, 0x00, 0xef, 0xd5, 0xfd, 0xcf, 0x7e, + 0xa0, 0x11, 0x00, 0x00, 0x00, 0x92, 0x01, 0x44, + 0xcd, 0xf9, 0x5c, 0x00, 0x01, 0x00, 0x30, 0x04, + 0x04, 0x55, 0x00, 0x01, 0x04, 0xf4, 0x3f, 0x4a, + 0x01, 0x00, 0x00, 0xb0, 0x80, 0x00, 0x55, 0x55, + 0x97, 0x7c, 0x9f, 0x31, 0xcc, 0x68, 0xd1, 0x03, + 0xd5, 0x57, 0x27, 0x14, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x2c, 0xf7, 0xcb, 0x1f, 0x14, 0x60, + // Entry 780 - 7BF + 0x03, 0x68, 0x01, 0x10, 0x8b, 0x38, 0x8a, 0x01, + 0x00, 0x00, 0x20, 0x00, 0x24, 0x44, 0x00, 0x00, + 0x10, 0x03, 0x11, 0x02, 0x01, 0x00, 0x00, 0xf0, + 0xf5, 0xff, 0xd5, 0x97, 0xbc, 0x70, 0xd6, 0x78, + 0x78, 0x15, 0x50, 0x00, 0xa4, 0x84, 0xa9, 0x41, + 0x00, 0x00, 0x00, 0x6b, 0x39, 0x52, 0x74, 0x00, + 0xe8, 0x30, 0x90, 0x6a, 0x92, 0x00, 0x00, 0x02, + 0xff, 0xef, 0xff, 0x4b, 0x85, 0x53, 0xf4, 0xed, + // Entry 7C0 - 7FF + 0xdd, 0xbf, 0x72, 0x19, 0xc7, 0x0c, 0xd5, 0x42, + 0x54, 0xdd, 0x77, 0x14, 0x00, 0x80, 0x40, 0x56, + 0xcc, 0x16, 0x9e, 0xea, 0x35, 0x7d, 0xef, 0xff, + 0xbd, 0xa4, 0xaf, 0x01, 0x44, 0x18, 0x01, 0x4d, + 0x4e, 0x4a, 0x08, 0x50, 0x28, 0x30, 0xe0, 0x80, + 0x10, 0x20, 0x24, 0x00, 0xff, 0x2f, 0xd3, 0x60, + 0xfe, 0x01, 0x02, 0x88, 0x0a, 0x40, 0x16, 0x01, + 0x01, 0x15, 0x2b, 0x3c, 0x01, 0x00, 0x00, 0x10, + // Entry 800 - 83F + 0x90, 0x49, 0x41, 0x02, 0x02, 0x01, 0xe1, 0xbf, + 0xbf, 0x03, 0x00, 0x00, 0x10, 0xd4, 0xa3, 0xd1, + 0x40, 0x9c, 0x44, 0xdf, 0xf5, 0x8f, 0x66, 0xb3, + 0x55, 0x20, 0xd4, 0xc1, 0xd8, 0x30, 0x3d, 0x80, + 0x00, 0x00, 0x00, 0x04, 0xd4, 0x11, 0xc5, 0x84, + 0x2e, 0x50, 0x00, 0x22, 0x50, 0x6e, 0xbd, 0x93, + 0x07, 0x00, 0x20, 0x10, 0x84, 0xb2, 0x45, 0x10, + 0x06, 0x44, 0x00, 0x00, 0x12, 0x02, 0x11, 0x00, + // Entry 840 - 87F + 0xf0, 0xfb, 0xfd, 0x3f, 0x05, 0x00, 0x12, 0x81, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x30, 0x02, 0x28, + 0x84, 0x00, 0x23, 0xc0, 0x23, 0x24, 0x00, 0x00, + 0x00, 0xcb, 0xe4, 0x3a, 0x42, 0x88, 0x14, 0xf1, + 0xef, 0xff, 0x7f, 0x12, 0x01, 0x01, 0x84, 0x50, + 0x07, 0xfc, 0xff, 0xff, 0x0f, 0x01, 0x00, 0x40, + 0x10, 0x38, 0x01, 0x01, 0x1c, 0x12, 0x40, 0xe1, + // Entry 880 - 8BF + 0x76, 0x16, 0x08, 0x03, 0x10, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x24, + 0x0a, 0x00, 0x80, 0x00, 0x00, +} + +// altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives +// to 2-letter language codes that cannot be derived using the method described above. +// Each 3-letter code is followed by its 1-byte langID. +const altLangISO3 tag.Index = "---\x00cor\x00hbs\x01heb\x02kin\x03spa\x04yid\x05\xff\xff\xff\xff" + +// altLangIndex is used to convert indexes in altLangISO3 to langIDs. +// Size: 12 bytes, 6 elements +var altLangIndex = [6]uint16{ + 0x0278, 0x03fd, 0x01f3, 0x03dc, 0x0139, 0x0200, +} + +// langAliasMap maps langIDs to their suggested replacements. +// Size: 644 bytes, 161 elements +var langAliasMap = [161]fromTo{ + 0: {from: 0x81, to: 0x87}, + 1: {from: 0x181, to: 0x1a7}, + 2: {from: 0x1eb, to: 0x1da}, + 3: {from: 0x1f3, to: 0x1b5}, + 4: {from: 0x200, to: 0x508}, + 5: {from: 0x207, to: 0x206}, + 6: {from: 0x307, to: 0x3d3}, + 7: {from: 0x33e, to: 0x366}, + 8: {from: 0x3fd, to: 0x428}, + 9: {from: 0x470, to: 0x14e}, + 10: {from: 0x486, to: 0x447}, + 11: {from: 0x498, to: 0x20}, + 12: {from: 0x533, to: 0x539}, + 13: {from: 0x584, to: 0x129}, + 14: {from: 0x625, to: 0x1ea6}, + 15: {from: 0x646, to: 0x427}, + 16: {from: 0x657, to: 0x427}, + 17: {from: 0x6e2, to: 0x39}, + 18: {from: 0x6ed, to: 0x1d0}, + 19: {from: 0x733, to: 0x2196}, + 20: {from: 0x7a8, to: 0x55}, + 21: {from: 0x7ae, to: 0x2990}, + 22: {from: 0x7ba, to: 0x57}, + 23: {from: 0x7db, to: 0x140}, + 24: {from: 0x801, to: 0x59}, + 25: {from: 0x80a, to: 0x8c}, + 26: {from: 0x873, to: 0x805}, + 27: {from: 0x8b8, to: 0xed8}, + 28: {from: 0x9e4, to: 0x328}, + 29: {from: 0xa2b, to: 0x2bc}, + 30: {from: 0xa32, to: 0xbd}, + 31: {from: 0xab3, to: 0x3317}, + 32: {from: 0xb2d, to: 0x51f}, + 33: {from: 0xb6a, to: 0x264f}, + 34: {from: 0xb73, to: 0xbb8}, + 35: {from: 0xb90, to: 0x444}, + 36: {from: 0xbb1, to: 0x421e}, + 37: {from: 0xbb4, to: 0x51f}, + 38: {from: 0xbf3, to: 0x2d9c}, + 39: {from: 0xc23, to: 0x3176}, + 40: {from: 0xcae, to: 0xf0}, + 41: {from: 0xcfd, to: 0xf6}, + 42: {from: 0xdbd, to: 0x116}, + 43: {from: 0xdcc, to: 0x324}, + 44: {from: 0xded, to: 0xdf0}, + 45: {from: 0xdf3, to: 0x526}, + 46: {from: 0xed4, to: 0x204f}, + 47: {from: 0xee3, to: 0x2e8f}, + 48: {from: 0xf2e, to: 0x35e}, + 49: {from: 0x10c5, to: 0x13b}, + 50: {from: 0x10f9, to: 0x2c7}, + 51: {from: 0x1195, to: 0x1e4}, + 52: {from: 0x126e, to: 0x20}, + 53: {from: 0x1419, to: 0x159}, + 54: {from: 0x1465, to: 0x149}, + 55: {from: 0x1514, to: 0xd90}, + 56: {from: 0x1518, to: 0x387}, + 57: {from: 0x1527, to: 0x16ba}, + 58: {from: 0x1575, to: 0x208}, + 59: {from: 0x1578, to: 0x109}, + 60: {from: 0x1598, to: 0x3ca4}, + 61: {from: 0x165f, to: 0x195}, + 62: {from: 0x16bd, to: 0x131}, + 63: {from: 0x16f5, to: 0x29ed}, + 64: {from: 0x170d, to: 0x18e}, + 65: {from: 0x171c, to: 0xf34}, + 66: {from: 0x176f, to: 0x1519}, + 67: {from: 0x17fe, to: 0x17ab}, + 68: {from: 0x180b, to: 0x18e8}, + 69: {from: 0x187f, to: 0x42c}, + 70: {from: 0x196e, to: 0x1cf6}, + 71: {from: 0x1a69, to: 0x2ba5}, + 72: {from: 0x1a7f, to: 0x1f0}, + 73: {from: 0x1b4f, to: 0x1f2}, + 74: {from: 0x1b7b, to: 0x150a}, + 75: {from: 0x202d, to: 0x37a6}, + 76: {from: 0x2032, to: 0x20d2}, + 77: {from: 0x204f, to: 0x302}, + 78: {from: 0x20d8, to: 0x26b}, + 79: {from: 0x20e3, to: 0x25a}, + 80: {from: 0x20e7, to: 0x225}, + 81: {from: 0x20ee, to: 0x24d}, + 82: {from: 0x2104, to: 0x21e0}, + 83: {from: 0x212a, to: 0x274}, + 84: {from: 0x218e, to: 0x11d}, + 85: {from: 0x21c3, to: 0x1556}, + 86: {from: 0x21db, to: 0x4fa}, + 87: {from: 0x21e9, to: 0x495}, + 88: {from: 0x2222, to: 0x11d}, + 89: {from: 0x222c, to: 0x11d}, + 90: {from: 0x2257, to: 0x91f}, + 91: {from: 0x230b, to: 0x321b}, + 92: {from: 0x2377, to: 0x335a}, + 93: {from: 0x2467, to: 0x2be}, + 94: {from: 0x24d9, to: 0x2f6}, + 95: {from: 0x24e5, to: 0x2f1}, + 96: {from: 0x24ef, to: 0x316}, + 97: {from: 0x2545, to: 0xb50}, + 98: {from: 0x259e, to: 0xe0}, + 99: {from: 0x2633, to: 0x2c7}, + 100: {from: 0x26be, to: 0x26a9}, + 101: {from: 0x26ee, to: 0x3bf}, + 102: {from: 0x271c, to: 0x3ca4}, + 103: {from: 0x275a, to: 0x26a9}, + 104: {from: 0x277e, to: 0x434d}, + 105: {from: 0x28e4, to: 0x282c}, + 106: {from: 0x2909, to: 0x348}, + 107: {from: 0x297b, to: 0x2d9c}, + 108: {from: 0x2b0f, to: 0x384}, + 109: {from: 0x2bf1, to: 0x38c}, + 110: {from: 0x2c34, to: 0x3ca4}, + 111: {from: 0x2cf1, to: 0x3b5}, + 112: {from: 0x2d08, to: 0x58c}, + 113: {from: 0x2d3c, to: 0x143}, + 114: {from: 0x2d3d, to: 0x143}, + 115: {from: 0x2df4, to: 0x2e8}, + 116: {from: 0x2dfd, to: 0x19c1}, + 117: {from: 0x2e0f, to: 0x2d8a}, + 118: {from: 0x2e16, to: 0x289}, + 119: {from: 0x2e49, to: 0x7c}, + 120: {from: 0x2e5a, to: 0x2277}, + 121: {from: 0x2e95, to: 0x2e90}, + 122: {from: 0x2ee4, to: 0x2ecc}, + 123: {from: 0x3188, to: 0x3bb}, + 124: {from: 0x335b, to: 0x3383}, + 125: {from: 0x341f, to: 0x3d3}, + 126: {from: 0x34e3, to: 0x18c5}, + 127: {from: 0x35db, to: 0x408}, + 128: {from: 0x364d, to: 0x23e}, + 129: {from: 0x366b, to: 0x3ea}, + 130: {from: 0x36f2, to: 0x43b}, + 131: {from: 0x37b5, to: 0x11d}, + 132: {from: 0x380b, to: 0x38e7}, + 133: {from: 0x3820, to: 0x2c90}, + 134: {from: 0x3824, to: 0xa7}, + 135: {from: 0x3827, to: 0x321d}, + 136: {from: 0x3861, to: 0x399b}, + 137: {from: 0x3887, to: 0x3fb5}, + 138: {from: 0x389a, to: 0x39cc}, + 139: {from: 0x38a9, to: 0x1f99}, + 140: {from: 0x38aa, to: 0x2e8f}, + 141: {from: 0x3951, to: 0x474}, + 142: {from: 0x3b43, to: 0xd86}, + 143: {from: 0x3b6d, to: 0x132}, + 144: {from: 0x3c8e, to: 0x4b2}, + 145: {from: 0x3fb2, to: 0xfc}, + 146: {from: 0x41fd, to: 0xa86}, + 147: {from: 0x42b3, to: 0x568}, + 148: {from: 0x42ee, to: 0x3f55}, + 149: {from: 0x436d, to: 0x251}, + 150: {from: 0x43c0, to: 0x36c0}, + 151: {from: 0x43c2, to: 0x10b}, + 152: {from: 0x44a4, to: 0x3317}, + 153: {from: 0x44d8, to: 0x508}, + 154: {from: 0x45bf, to: 0x23fe}, + 155: {from: 0x45d2, to: 0x26d1}, + 156: {from: 0x4605, to: 0x48a3}, + 157: {from: 0x46a3, to: 0x4695}, + 158: {from: 0x4733, to: 0x473a}, + 159: {from: 0x490b, to: 0x316}, + 160: {from: 0x499c, to: 0x519}, +} + +// Size: 161 bytes, 161 elements +var langAliasTypes = [161]langAliasType{ + // Entry 0 - 3F + 1, 0, 0, 0, 0, 0, 0, 1, 2, 2, 0, 1, 0, 0, 1, 2, + 1, 1, 2, 0, 1, 0, 1, 2, 1, 1, 0, 0, 2, 1, 1, 0, + 2, 0, 0, 1, 0, 1, 0, 0, 1, 2, 1, 1, 1, 1, 0, 0, + 2, 1, 1, 1, 1, 2, 1, 0, 1, 1, 2, 2, 0, 1, 2, 0, + // Entry 40 - 7F + 1, 0, 1, 1, 1, 1, 0, 0, 2, 1, 0, 0, 0, 1, 1, 1, + 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 2, 2, + 2, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, + 0, 2, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 2, 0, 2, + // Entry 80 - BF + 1, 1, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 1, 0, + 1, 2, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, + 1, +} + +const ( + _Latn = 82 + _Hani = 50 + _Hans = 52 + _Hant = 53 + _Qaaa = 131 + _Qaai = 139 + _Qabx = 180 + _Zinh = 224 + _Zyyy = 229 + _Zzzz = 230 +) + +// script is an alphabetically sorted list of ISO 15924 codes. The index +// of the script in the string, divided by 4, is the internal scriptID. +const script tag.Index = "" + // Size: 928 bytes + "----AdlmAfakAghbAhomArabAranArmiArmnAvstBaliBamuBassBatkBengBhksBlisBopo" + + "BrahBraiBugiBuhdCakmCansCariChamCherCirtCoptCprtCyrlCyrsDevaDsrtDuplEgyd" + + "EgyhEgypElbaEthiGeokGeorGlagGothGranGrekGujrGuruHanbHangHaniHanoHansHant" + + "HatrHebrHiraHluwHmngHrktHungIndsItalJamoJavaJpanJurcKaliKanaKharKhmrKhoj" + + "KitlKitsKndaKoreKpelKthiLanaLaooLatfLatgLatnLekeLepcLimbLinaLinbLisuLoma" + + "LyciLydiMahjMandManiMarcMayaMendMercMeroMlymModiMongMoonMrooMteiMultMymr" + + "NarbNbatNewaNkgbNkooNshuOgamOlckOrkhOryaOsgeOsmaPalmPaucPermPhagPhliPhlp" + + "PhlvPhnxPiqdPlrdPrtiQaaaQaabQaacQaadQaaeQaafQaagQaahQaaiQaajQaakQaalQaam" + + "QaanQaaoQaapQaaqQaarQaasQaatQaauQaavQaawQaaxQaayQaazQabaQabbQabcQabdQabe" + + "QabfQabgQabhQabiQabjQabkQablQabmQabnQaboQabpQabqQabrQabsQabtQabuQabvQabw" + + "QabxRjngRoroRunrSamrSaraSarbSaurSgnwShawShrdSiddSindSinhSoraSundSyloSyrc" + + "SyreSyrjSyrnTagbTakrTaleTaluTamlTangTavtTeluTengTfngTglgThaaThaiTibtTirh" + + "UgarVaiiVispWaraWoleXpeoXsuxYiiiZinhZmthZsyeZsymZxxxZyyyZzzz\xff\xff\xff" + + "\xff" + +// suppressScript is an index from langID to the dominant script for that language, +// if it exists. If a script is given, it should be suppressed from the language tag. +// Size: 1319 bytes, 1319 elements +var suppressScript = [1319]uint8{ + // Entry 0 - 3F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x27, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 40 - 7F + 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, + // Entry 80 - BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry C0 - FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x52, 0x00, 0x00, + // Entry 100 - 13F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00, + 0x00, 0xd6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x2d, 0x00, 0x00, 0x52, 0x00, 0x00, 0x52, + 0x00, 0x52, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, + // Entry 140 - 17F + 0x52, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, + 0x52, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x52, 0x00, 0x00, 0x52, 0x52, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 180 - 1BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x52, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x2e, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x37, 0x00, 0x20, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 1C0 - 1FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x52, 0x52, 0x00, 0x52, 0x52, 0x00, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x52, 0x00, 0x37, 0x00, 0x00, 0x00, 0x00, + 0x41, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 200 - 23F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x1e, 0x00, 0x00, 0x52, 0x00, 0x00, + // Entry 240 - 27F + 0x00, 0x00, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x4a, 0x00, 0x4b, 0x00, 0x20, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 280 - 2BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x4f, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, + // Entry 2C0 - 2FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x00, + 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, + // Entry 300 - 33F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x52, 0x52, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, + // Entry 340 - 37F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x52, + 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, + 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x70, 0x52, 0x00, 0x00, + 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, + // Entry 380 - 3BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, + 0x00, 0x00, 0x00, 0x00, 0x75, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x52, + 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, + // Entry 3C0 - 3FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x1e, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 400 - 43F + 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, + 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, + 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x52, 0x52, 0x00, 0x00, 0x00, 0x00, + // Entry 440 - 47F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd0, + 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xd5, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, + 0x52, 0x00, 0x00, 0x00, 0x52, 0x00, 0x52, 0x00, + 0x52, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, + // Entry 480 - 4BF + 0x52, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, + 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 4C0 - 4FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 500 - 53F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, +} + +const ( + _001 = 1 + _419 = 30 + _BR = 64 + _CA = 72 + _ES = 109 + _GB = 122 + _MD = 187 + _PT = 237 + _UK = 305 + _US = 308 + _ZZ = 356 + _XA = 322 + _XC = 324 + _XK = 332 +) + +// isoRegionOffset needs to be added to the index of regionISO to obtain the regionID +// for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for +// the UN.M49 codes used for groups.) +const isoRegionOffset = 31 + +// regionTypes defines the status of a region for various standards. +// Size: 357 bytes, 357 elements +var regionTypes = [357]uint8{ + // Entry 0 - 3F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + // Entry 40 - 7F + 0x06, 0x06, 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, 0x04, 0x00, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, + 0x04, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x04, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, + 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + // Entry 80 - BF + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x00, 0x04, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + // Entry C0 - FF + 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, + 0x06, 0x06, 0x06, 0x00, 0x06, 0x04, 0x06, 0x06, + 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, + 0x06, 0x00, 0x06, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + // Entry 100 - 13F + 0x05, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x02, 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + // Entry 140 - 17F + 0x00, 0x06, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x04, 0x06, 0x06, 0x04, + 0x06, 0x06, 0x04, 0x06, 0x05, +} + +// regionISO holds a list of alphabetically sorted 2-letter ISO region codes. +// Each 2-letter codes is followed by two bytes with the following meaning: +// - [A-Z}{2}: the first letter of the 2-letter code plus these two +// letters form the 3-letter ISO code. +// - 0, n: index into altRegionISO3. +const regionISO tag.Index = "" + // Size: 1308 bytes + "AAAAACSCADNDAEREAFFGAGTGAIIAALLBAMRMANNTAOGOAQTAARRGASSMATUTAUUSAWBWAXLA" + + "AZZEBAIHBBRBBDGDBEELBFFABGGRBHHRBIDIBJENBLLMBMMUBNRNBOOLBQESBRRABSHSBTTN" + + "BUURBVVTBWWABYLRBZLZCAANCCCKCDODCFAFCGOGCHHECIIVCKOKCLHLCMMRCNHNCOOLCPPT" + + "CRRICS\x00\x00CTTECUUBCVPVCWUWCXXRCYYPCZZEDDDRDEEUDGGADJJIDKNKDMMADOOMDY" + + "HYDZZAEA ECCUEESTEGGYEHSHERRIESSPETTHEU\x00\x03EZ FIINFJJIFKLKFMSMFORO" + + "FQ\x00\x18FRRAFXXXGAABGBBRGDRDGEEOGFUFGGGYGHHAGIIBGLRLGMMBGNINGPLPGQNQGR" + + "RCGS\x00\x06GTTMGUUMGWNBGYUYHKKGHMMDHNNDHRRVHTTIHUUNHVVOIC IDDNIERLILSR" + + "IMMNINNDIOOTIQRQIRRNISSLITTAJEEYJMAMJOORJPPNJTTNKEENKGGZKHHMKIIRKM\x00" + + "\x09KNNAKP\x00\x0cKRORKWWTKY\x00\x0fKZAZLAAOLBBNLCCALIIELKKALRBRLSSOLTTU" + + "LUUXLVVALYBYMAARMCCOMDDAMENEMFAFMGDGMHHLMIIDMKKDMLLIMMMRMNNGMOACMPNPMQTQ" + + "MRRTMSSRMTLTMUUSMVDVMWWIMXEXMYYSMZOZNAAMNCCLNEERNFFKNGGANHHBNIICNLLDNOOR" + + "NPPLNQ\x00\x1eNRRUNTTZNUIUNZZLOMMNPAANPCCIPEERPFYFPGNGPHHLPKAKPLOLPM\x00" + + "\x12PNCNPRRIPSSEPTRTPUUSPWLWPYRYPZCZQAATQMMMQNNNQOOOQPPPQQQQQRRRQSSSQTTT" + + "QU\x00\x03QVVVQWWWQXXXQYYYQZZZREEURHHOROOURS\x00\x15RUUSRWWASAAUSBLBSCYC" + + "SDDNSEWESGGPSHHNSIVNSJJMSKVKSLLESMMRSNENSOOMSRURSSSDSTTPSUUNSVLVSXXMSYYR" + + "SZWZTAAATCCATDCDTF\x00\x18TGGOTHHATJJKTKKLTLLSTMKMTNUNTOONTPMPTRURTTTOTV" + + "UVTWWNTZZAUAKRUGGAUK UMMIUN USSAUYRYUZZBVAATVCCTVDDRVEENVGGBVIIRVNNMVU" + + "UTWFLFWKAKWSSMXAAAXBBBXCCCXDDDXEEEXFFFXGGGXHHHXIIIXJJJXKKKXLLLXMMMXNNNXO" + + "OOXPPPXQQQXRRRXSSSXTTTXUUUXVVVXWWWXXXXXYYYXZZZYDMDYEEMYT\x00\x1bYUUGZAAF" + + "ZMMBZRARZWWEZZZZ\xff\xff\xff\xff" + +// altRegionISO3 holds a list of 3-letter region codes that cannot be +// mapped to 2-letter codes using the default algorithm. This is a short list. +const altRegionISO3 string = "SCGQUUSGSCOMPRKCYMSPMSRBATFMYTATN" + +// altRegionIDs holds a list of regionIDs the positions of which match those +// of the 3-letter ISO codes in altRegionISO3. +// Size: 22 bytes, 11 elements +var altRegionIDs = [11]uint16{ + 0x0056, 0x006f, 0x0087, 0x00a7, 0x00a9, 0x00ac, 0x00e9, 0x0104, + 0x0120, 0x015e, 0x00db, +} + +// Size: 80 bytes, 20 elements +var regionOldMap = [20]fromTo{ + 0: {from: 0x43, to: 0xc3}, + 1: {from: 0x57, to: 0xa6}, + 2: {from: 0x5e, to: 0x5f}, + 3: {from: 0x65, to: 0x3a}, + 4: {from: 0x78, to: 0x77}, + 5: {from: 0x92, to: 0x36}, + 6: {from: 0xa2, to: 0x132}, + 7: {from: 0xc0, to: 0x132}, + 8: {from: 0xd6, to: 0x13e}, + 9: {from: 0xdb, to: 0x2a}, + 10: {from: 0xee, to: 0x132}, + 11: {from: 0xf1, to: 0xe1}, + 12: {from: 0xfb, to: 0x6f}, + 13: {from: 0x102, to: 0x163}, + 14: {from: 0x129, to: 0x125}, + 15: {from: 0x131, to: 0x7a}, + 16: {from: 0x139, to: 0x13d}, + 17: {from: 0x140, to: 0x132}, + 18: {from: 0x15c, to: 0x15d}, + 19: {from: 0x162, to: 0x4a}, +} + +// m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are +// codes indicating collections of regions. +// Size: 714 bytes, 357 elements +var m49 = [357]int16{ + // Entry 0 - 3F + 0, 1, 2, 3, 5, 9, 11, 13, + 14, 15, 17, 18, 19, 21, 29, 30, + 34, 35, 39, 53, 54, 57, 61, 142, + 143, 145, 150, 151, 154, 155, 419, 958, + 0, 20, 784, 4, 28, 660, 8, 51, + 530, 24, 10, 32, 16, 40, 36, 533, + 248, 31, 70, 52, 50, 56, 854, 100, + 48, 108, 204, 652, 60, 96, 68, 535, + // Entry 40 - 7F + 76, 44, 64, 104, 74, 72, 112, 84, + 124, 166, 180, 140, 178, 756, 384, 184, + 152, 120, 156, 170, 0, 188, 891, 296, + 192, 132, 531, 162, 196, 203, 278, 276, + 0, 262, 208, 212, 214, 204, 12, 0, + 218, 233, 818, 732, 232, 724, 231, 967, + 0, 246, 242, 238, 583, 234, 0, 250, + 249, 266, 826, 308, 268, 254, 831, 288, + // Entry 80 - BF + 292, 304, 270, 324, 312, 226, 300, 239, + 320, 316, 624, 328, 344, 334, 340, 191, + 332, 348, 854, 0, 360, 372, 376, 833, + 356, 86, 368, 364, 352, 380, 832, 388, + 400, 392, 581, 404, 417, 116, 296, 174, + 659, 408, 410, 414, 136, 398, 418, 422, + 662, 438, 144, 430, 426, 440, 442, 428, + 434, 504, 492, 498, 499, 663, 450, 584, + // Entry C0 - FF + 581, 807, 466, 104, 496, 446, 580, 474, + 478, 500, 470, 480, 462, 454, 484, 458, + 508, 516, 540, 562, 574, 566, 548, 558, + 528, 578, 524, 10, 520, 536, 570, 554, + 512, 591, 0, 604, 258, 598, 608, 586, + 616, 666, 612, 630, 275, 620, 581, 585, + 600, 591, 634, 959, 960, 961, 962, 963, + 964, 965, 966, 967, 968, 969, 970, 971, + // Entry 100 - 13F + 972, 638, 716, 642, 688, 643, 646, 682, + 90, 690, 729, 752, 702, 654, 705, 744, + 703, 694, 674, 686, 706, 740, 728, 678, + 810, 222, 534, 760, 748, 0, 796, 148, + 260, 768, 764, 762, 772, 626, 795, 788, + 776, 626, 792, 780, 798, 158, 834, 804, + 800, 826, 581, 0, 840, 858, 860, 336, + 670, 704, 862, 92, 850, 704, 548, 876, + // Entry 140 - 17F + 581, 882, 973, 974, 975, 976, 977, 978, + 979, 980, 981, 982, 983, 984, 985, 986, + 987, 988, 989, 990, 991, 992, 993, 994, + 995, 996, 997, 998, 720, 887, 175, 891, + 710, 894, 180, 716, 999, +} + +// m49Index gives indexes into fromM49 based on the three most significant bits +// of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in +// fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] +// for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. +// The region code is stored in the 9 lsb of the indexed value. +// Size: 18 bytes, 9 elements +var m49Index = [9]int16{ + 0, 59, 107, 142, 180, 219, 258, 290, + 332, +} + +// fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details. +// Size: 664 bytes, 332 elements +var fromM49 = [332]uint16{ + // Entry 0 - 3F + 0x0201, 0x0402, 0x0603, 0x0823, 0x0a04, 0x1026, 0x1205, 0x142a, + 0x1606, 0x1866, 0x1a07, 0x1c08, 0x1e09, 0x202c, 0x220a, 0x240b, + 0x260c, 0x2821, 0x2a0d, 0x3029, 0x3824, 0x3a0e, 0x3c0f, 0x3e31, + 0x402b, 0x4410, 0x4611, 0x482e, 0x4e12, 0x502d, 0x5841, 0x6038, + 0x6434, 0x6627, 0x6833, 0x6a13, 0x6c14, 0x7035, 0x7215, 0x783c, + 0x7a16, 0x8042, 0x883e, 0x8c32, 0x9045, 0x9444, 0x9840, 0xa847, + 0xac99, 0xb508, 0xb93b, 0xc03d, 0xc837, 0xd0c3, 0xd839, 0xe046, + 0xe8a5, 0xf051, 0xf848, 0x0859, 0x10ac, 0x184b, 0x1c17, 0x1e18, + // Entry 40 - 7F + 0x20b2, 0x2219, 0x291f, 0x2c1a, 0x2e1b, 0x3050, 0x341c, 0x361d, + 0x3852, 0x3d2d, 0x445b, 0x4c49, 0x5453, 0x5ca7, 0x5f5e, 0x644c, + 0x684a, 0x704f, 0x7855, 0x7e8f, 0x8058, 0x885c, 0x965d, 0x983a, + 0xa062, 0xa863, 0xac64, 0xb468, 0xbd19, 0xc485, 0xcc6e, 0xce6e, + 0xd06c, 0xd269, 0xd475, 0xdc73, 0xde87, 0xe472, 0xec71, 0xf030, + 0xf278, 0xf477, 0xfc7d, 0x04e4, 0x0920, 0x0c61, 0x1479, 0x187c, + 0x1c82, 0x26ec, 0x285f, 0x2c5e, 0x305f, 0x407f, 0x4880, 0x50a6, + 0x5886, 0x6081, 0x687b, 0x7084, 0x7889, 0x8088, 0x8883, 0x908b, + // Entry 80 - BF + 0x9890, 0x9c8d, 0xa137, 0xa88e, 0xb08c, 0xb891, 0xc09c, 0xc898, + 0xd094, 0xd89b, 0xe09a, 0xe895, 0xf096, 0xf89d, 0x004e, 0x089f, + 0x10a1, 0x1cad, 0x20a0, 0x28a3, 0x30a9, 0x34aa, 0x3cab, 0x42a4, + 0x44ae, 0x461e, 0x4caf, 0x54b4, 0x58b7, 0x5cb3, 0x64b8, 0x6cb1, + 0x70b5, 0x74b6, 0x7cc5, 0x84be, 0x8ccd, 0x94cf, 0x9ccc, 0xa4c2, + 0xacca, 0xb4c7, 0xbcc8, 0xc0cb, 0xc8ce, 0xd8ba, 0xe0c4, 0xe4bb, + 0xe6bc, 0xe8c9, 0xf0b9, 0xf8d0, 0x00e0, 0x08d1, 0x10dc, 0x18da, + 0x20d8, 0x2428, 0x265a, 0x2a2f, 0x2d1a, 0x2e3f, 0x30dd, 0x38d2, + // Entry C0 - FF + 0x493e, 0x54df, 0x5cd7, 0x64d3, 0x6cd5, 0x74de, 0x7cd4, 0x84d9, + 0x88c6, 0x8b32, 0x8e74, 0x90bf, 0x92ef, 0x94e7, 0x9ee1, 0xace5, + 0xb0f0, 0xb8e3, 0xc0e6, 0xc8ea, 0xd0e8, 0xd8ed, 0xe08a, 0xe525, + 0xeceb, 0xf4f2, 0xfd01, 0x0503, 0x0705, 0x0d06, 0x183b, 0x1d0d, + 0x26a8, 0x2825, 0x2cb0, 0x2ebd, 0x34e9, 0x3d38, 0x4512, 0x4d17, + 0x5507, 0x5d13, 0x6104, 0x6509, 0x6d11, 0x7d0c, 0x7f10, 0x813d, + 0x830e, 0x8514, 0x8d60, 0x9963, 0xa15c, 0xa86d, 0xb116, 0xb30a, + 0xb86b, 0xc10a, 0xc915, 0xd10f, 0xd91c, 0xe10b, 0xe84d, 0xf11b, + // Entry 100 - 13F + 0xf523, 0xf922, 0x0121, 0x0924, 0x1128, 0x192b, 0x2022, 0x2927, + 0x312a, 0x3726, 0x391e, 0x3d2c, 0x4130, 0x492f, 0x4ec1, 0x5518, + 0x646a, 0x747a, 0x7e7e, 0x809e, 0x8297, 0x852e, 0x9134, 0xa53c, + 0xac36, 0xb535, 0xb936, 0xbd3a, 0xd93f, 0xe541, 0xed5d, 0xef5d, + 0xf656, 0xfd61, 0x7c1f, 0x7ef3, 0x80f4, 0x82f5, 0x84f6, 0x86f7, + 0x88f8, 0x8af9, 0x8cfa, 0x8e6f, 0x90fc, 0x92fd, 0x94fe, 0x96ff, + 0x9900, 0x9b42, 0x9d43, 0x9f44, 0xa145, 0xa346, 0xa547, 0xa748, + 0xa949, 0xab4a, 0xad4b, 0xaf4c, 0xb14d, 0xb34e, 0xb54f, 0xb750, + // Entry 140 - 17F + 0xb951, 0xbb52, 0xbd53, 0xbf54, 0xc155, 0xc356, 0xc557, 0xc758, + 0xc959, 0xcb5a, 0xcd5b, 0xcf64, +} + +// Size: 1463 bytes +var variantIndex = map[string]uint8{ + "1606nict": 0x0, + "1694acad": 0x1, + "1901": 0x2, + "1959acad": 0x3, + "1994": 0x45, + "1996": 0x4, + "abl1943": 0x5, + "alalc97": 0x47, + "aluku": 0x6, + "ao1990": 0x7, + "arevela": 0x8, + "arevmda": 0x9, + "baku1926": 0xa, + "balanka": 0xb, + "barla": 0xc, + "basiceng": 0xd, + "bauddha": 0xe, + "biscayan": 0xf, + "biske": 0x40, + "bohoric": 0x10, + "boont": 0x11, + "colb1945": 0x12, + "cornu": 0x13, + "dajnko": 0x14, + "ekavsk": 0x15, + "emodeng": 0x16, + "fonipa": 0x48, + "fonnapa": 0x49, + "fonupa": 0x4a, + "fonxsamp": 0x4b, + "hepburn": 0x17, + "heploc": 0x46, + "hognorsk": 0x18, + "ijekavsk": 0x19, + "itihasa": 0x1a, + "jauer": 0x1b, + "jyutping": 0x1c, + "kkcor": 0x1d, + "kociewie": 0x1e, + "kscor": 0x1f, + "laukika": 0x20, + "lipaw": 0x41, + "luna1918": 0x21, + "metelko": 0x22, + "monoton": 0x23, + "ndyuka": 0x24, + "nedis": 0x25, + "newfound": 0x26, + "njiva": 0x42, + "nulik": 0x27, + "osojs": 0x43, + "oxendict": 0x28, + "pamaka": 0x29, + "petr1708": 0x2a, + "pinyin": 0x2b, + "polyton": 0x2c, + "puter": 0x2d, + "rigik": 0x2e, + "rozaj": 0x2f, + "rumgr": 0x30, + "scotland": 0x31, + "scouse": 0x32, + "simple": 0x4c, + "solba": 0x44, + "sotav": 0x33, + "surmiran": 0x34, + "sursilv": 0x35, + "sutsilv": 0x36, + "tarask": 0x37, + "uccor": 0x38, + "ucrcor": 0x39, + "ulster": 0x3a, + "unifon": 0x3b, + "vaidika": 0x3c, + "valencia": 0x3d, + "vallader": 0x3e, + "wadegile": 0x3f, +} + +// variantNumSpecialized is the number of specialized variants in variants. +const variantNumSpecialized = 71 + +// nRegionGroups is the number of region groups. +const nRegionGroups = 32 + +type likelyLangRegion struct { + lang uint16 + region uint16 +} + +// likelyScript is a lookup table, indexed by scriptID, for the most likely +// languages and regions given a script. +// Size: 928 bytes, 232 elements +var likelyScript = [232]likelyLangRegion{ + 1: {lang: 0x149, region: 0x83}, + 3: {lang: 0x299, region: 0x105}, + 4: {lang: 0x1e, region: 0x98}, + 5: {lang: 0x39, region: 0x6a}, + 7: {lang: 0x3a, region: 0x9b}, + 8: {lang: 0x1d0, region: 0x27}, + 9: {lang: 0x12, region: 0x9b}, + 10: {lang: 0x5a, region: 0x94}, + 11: {lang: 0x5f, region: 0x51}, + 12: {lang: 0xb7, region: 0xb3}, + 13: {lang: 0x62, region: 0x94}, + 14: {lang: 0xa3, region: 0x34}, + 15: {lang: 0x3e0, region: 0x98}, + 17: {lang: 0x51f, region: 0x12d}, + 18: {lang: 0x3a8, region: 0x98}, + 19: {lang: 0x159, region: 0x77}, + 20: {lang: 0xc0, region: 0x94}, + 21: {lang: 0x9b, region: 0xe6}, + 22: {lang: 0xd9, region: 0x34}, + 23: {lang: 0xf0, region: 0x48}, + 24: {lang: 0x4e6, region: 0x12a}, + 25: {lang: 0xe5, region: 0x13d}, + 26: {lang: 0xe3, region: 0x134}, + 28: {lang: 0xee, region: 0x6a}, + 29: {lang: 0x199, region: 0x5c}, + 30: {lang: 0x3d9, region: 0x105}, + 32: {lang: 0x1b7, region: 0x98}, + 34: {lang: 0x159, region: 0x77}, + 37: {lang: 0x12f, region: 0x6a}, + 38: {lang: 0x427, region: 0x26}, + 39: {lang: 0x26, region: 0x6e}, + 41: {lang: 0x208, region: 0x7c}, + 42: {lang: 0xfa, region: 0x37}, + 43: {lang: 0x198, region: 0x12f}, + 44: {lang: 0x3e0, region: 0x98}, + 45: {lang: 0x131, region: 0x86}, + 46: {lang: 0x19d, region: 0x98}, + 47: {lang: 0x394, region: 0x98}, + 48: {lang: 0x51f, region: 0x12d}, + 49: {lang: 0x24b, region: 0xaa}, + 50: {lang: 0x51f, region: 0x52}, + 51: {lang: 0x1c4, region: 0xe6}, + 52: {lang: 0x51f, region: 0x52}, + 53: {lang: 0x51f, region: 0x12d}, + 54: {lang: 0x2f4, region: 0x9a}, + 55: {lang: 0x1b5, region: 0x96}, + 56: {lang: 0x1f8, region: 0xa1}, + 57: {lang: 0x1be, region: 0x12a}, + 58: {lang: 0x1c3, region: 0xae}, + 60: {lang: 0x1ce, region: 0x91}, + 62: {lang: 0x13d, region: 0x9d}, + 63: {lang: 0x24b, region: 0xaa}, + 64: {lang: 0x206, region: 0x94}, + 65: {lang: 0x1f8, region: 0xa1}, + 67: {lang: 0x130, region: 0xc3}, + 68: {lang: 0x1f8, region: 0xa1}, + 69: {lang: 0x3b2, region: 0xe7}, + 70: {lang: 0x242, region: 0xa5}, + 71: {lang: 0x3f0, region: 0x98}, + 74: {lang: 0x249, region: 0x98}, + 75: {lang: 0x24b, region: 0xaa}, + 77: {lang: 0x87, region: 0x98}, + 78: {lang: 0x367, region: 0x122}, + 79: {lang: 0x2af, region: 0xae}, + 84: {lang: 0x296, region: 0x98}, + 85: {lang: 0x29f, region: 0x98}, + 86: {lang: 0x286, region: 0x86}, + 87: {lang: 0x199, region: 0x86}, + 88: {lang: 0x2a3, region: 0x52}, + 90: {lang: 0x4ea, region: 0x12a}, + 91: {lang: 0x4eb, region: 0x12a}, + 92: {lang: 0x1b7, region: 0x98}, + 93: {lang: 0x32e, region: 0x9b}, + 94: {lang: 0x4ed, region: 0x52}, + 95: {lang: 0xa7, region: 0x52}, + 97: {lang: 0x2df, region: 0x111}, + 98: {lang: 0x4ee, region: 0x10a}, + 99: {lang: 0x4ee, region: 0x10a}, + 100: {lang: 0x2fb, region: 0x98}, + 101: {lang: 0x312, region: 0x98}, + 102: {lang: 0x302, region: 0x52}, + 104: {lang: 0x315, region: 0x34}, + 105: {lang: 0x305, region: 0x98}, + 106: {lang: 0x40a, region: 0xe7}, + 107: {lang: 0x328, region: 0xc3}, + 108: {lang: 0x4ef, region: 0x107}, + 109: {lang: 0x3a, region: 0xa0}, + 110: {lang: 0x34a, region: 0xda}, + 112: {lang: 0x2c7, region: 0x83}, + 114: {lang: 0x3f9, region: 0x95}, + 115: {lang: 0x3e5, region: 0x98}, + 116: {lang: 0x392, region: 0xc4}, + 117: {lang: 0x38c, region: 0x98}, + 118: {lang: 0x390, region: 0x134}, + 119: {lang: 0x41f, region: 0x114}, + 120: {lang: 0x3a, region: 0x11b}, + 121: {lang: 0xf9, region: 0xc3}, + 122: {lang: 0x274, region: 0x105}, + 123: {lang: 0x2c0, region: 0x52}, + 124: {lang: 0x396, region: 0x9b}, + 125: {lang: 0x396, region: 0x52}, + 127: {lang: 0x3a4, region: 0xaf}, + 129: {lang: 0x1bf, region: 0x52}, + 130: {lang: 0x4f3, region: 0x9b}, + 181: {lang: 0x3c2, region: 0x94}, + 183: {lang: 0x369, region: 0x10b}, + 184: {lang: 0x416, region: 0x96}, + 186: {lang: 0x4f5, region: 0x15d}, + 187: {lang: 0x3e6, region: 0x98}, + 188: {lang: 0x44, region: 0x134}, + 189: {lang: 0x134, region: 0x7a}, + 190: {lang: 0x3e0, region: 0x98}, + 191: {lang: 0x3e0, region: 0x98}, + 192: {lang: 0x3f0, region: 0x98}, + 193: {lang: 0x402, region: 0xb2}, + 194: {lang: 0x429, region: 0x98}, + 195: {lang: 0x434, region: 0x94}, + 196: {lang: 0x443, region: 0x34}, + 197: {lang: 0x444, region: 0x9a}, + 201: {lang: 0x450, region: 0xe6}, + 202: {lang: 0x116, region: 0x98}, + 203: {lang: 0x454, region: 0x52}, + 204: {lang: 0x22a, region: 0x52}, + 205: {lang: 0x446, region: 0x98}, + 206: {lang: 0x49b, region: 0x52}, + 207: {lang: 0x9d, region: 0x13d}, + 208: {lang: 0x457, region: 0x98}, + 210: {lang: 0x51e, region: 0xb9}, + 211: {lang: 0x14e, region: 0xe6}, + 212: {lang: 0x124, region: 0xcc}, + 213: {lang: 0x461, region: 0x122}, + 214: {lang: 0xa7, region: 0x52}, + 215: {lang: 0x2c5, region: 0x98}, + 216: {lang: 0x4a3, region: 0x11b}, + 217: {lang: 0x4b4, region: 0xb3}, + 219: {lang: 0x1c7, region: 0x98}, + 221: {lang: 0x3a0, region: 0x9b}, + 222: {lang: 0x21, region: 0x9a}, + 223: {lang: 0x1e2, region: 0x52}, +} + +type likelyScriptRegion struct { + region uint16 + script uint8 + flags uint8 +} + +// likelyLang is a lookup table, indexed by langID, for the most likely +// scripts and regions given incomplete information. If more entries exist for a +// given language, region and script are the index and size respectively +// of the list in likelyLangList. +// Size: 5276 bytes, 1319 elements +var likelyLang = [1319]likelyScriptRegion{ + 0: {region: 0x134, script: 0x52, flags: 0x0}, + 1: {region: 0x6e, script: 0x52, flags: 0x0}, + 2: {region: 0x164, script: 0x52, flags: 0x0}, + 3: {region: 0x164, script: 0x52, flags: 0x0}, + 4: {region: 0x164, script: 0x52, flags: 0x0}, + 5: {region: 0x7c, script: 0x1e, flags: 0x0}, + 6: {region: 0x164, script: 0x52, flags: 0x0}, + 7: {region: 0x7f, script: 0x52, flags: 0x0}, + 8: {region: 0x164, script: 0x52, flags: 0x0}, + 9: {region: 0x164, script: 0x52, flags: 0x0}, + 10: {region: 0x164, script: 0x52, flags: 0x0}, + 11: {region: 0x94, script: 0x52, flags: 0x0}, + 12: {region: 0x130, script: 0x52, flags: 0x0}, + 13: {region: 0x7f, script: 0x52, flags: 0x0}, + 14: {region: 0x164, script: 0x52, flags: 0x0}, + 15: {region: 0x164, script: 0x52, flags: 0x0}, + 16: {region: 0x105, script: 0x1e, flags: 0x0}, + 17: {region: 0x164, script: 0x52, flags: 0x0}, + 18: {region: 0x9b, script: 0x9, flags: 0x0}, + 19: {region: 0x127, script: 0x5, flags: 0x0}, + 20: {region: 0x164, script: 0x52, flags: 0x0}, + 21: {region: 0x160, script: 0x52, flags: 0x0}, + 22: {region: 0x164, script: 0x52, flags: 0x0}, + 23: {region: 0x164, script: 0x52, flags: 0x0}, + 24: {region: 0x164, script: 0x52, flags: 0x0}, + 25: {region: 0x164, script: 0x52, flags: 0x0}, + 26: {region: 0x164, script: 0x52, flags: 0x0}, + 27: {region: 0x51, script: 0x52, flags: 0x0}, + 28: {region: 0x164, script: 0x52, flags: 0x0}, + 29: {region: 0x164, script: 0x52, flags: 0x0}, + 30: {region: 0x98, script: 0x4, flags: 0x0}, + 31: {region: 0x164, script: 0x52, flags: 0x0}, + 32: {region: 0x7f, script: 0x52, flags: 0x0}, + 33: {region: 0x9a, script: 0xde, flags: 0x0}, + 34: {region: 0x164, script: 0x52, flags: 0x0}, + 35: {region: 0x164, script: 0x52, flags: 0x0}, + 36: {region: 0x14c, script: 0x52, flags: 0x0}, + 37: {region: 0x105, script: 0x1e, flags: 0x0}, + 38: {region: 0x6e, script: 0x27, flags: 0x0}, + 39: {region: 0x164, script: 0x52, flags: 0x0}, + 40: {region: 0x164, script: 0x52, flags: 0x0}, + 41: {region: 0xd5, script: 0x52, flags: 0x0}, + 42: {region: 0x164, script: 0x52, flags: 0x0}, + 44: {region: 0x164, script: 0x52, flags: 0x0}, + 45: {region: 0x164, script: 0x52, flags: 0x0}, + 46: {region: 0x164, script: 0x52, flags: 0x0}, + 47: {region: 0x164, script: 0x52, flags: 0x0}, + 48: {region: 0x164, script: 0x52, flags: 0x0}, + 49: {region: 0x164, script: 0x52, flags: 0x0}, + 50: {region: 0x94, script: 0x52, flags: 0x0}, + 51: {region: 0x164, script: 0x5, flags: 0x0}, + 52: {region: 0x121, script: 0x5, flags: 0x0}, + 53: {region: 0x164, script: 0x52, flags: 0x0}, + 54: {region: 0x164, script: 0x52, flags: 0x0}, + 55: {region: 0x164, script: 0x52, flags: 0x0}, + 56: {region: 0x164, script: 0x52, flags: 0x0}, + 57: {region: 0x6a, script: 0x5, flags: 0x0}, + 58: {region: 0x0, script: 0x3, flags: 0x1}, + 59: {region: 0x164, script: 0x52, flags: 0x0}, + 60: {region: 0x50, script: 0x52, flags: 0x0}, + 61: {region: 0x3e, script: 0x52, flags: 0x0}, + 62: {region: 0x66, script: 0x5, flags: 0x0}, + 64: {region: 0xb9, script: 0x5, flags: 0x0}, + 65: {region: 0x6a, script: 0x5, flags: 0x0}, + 66: {region: 0x98, script: 0xe, flags: 0x0}, + 67: {region: 0x12e, script: 0x52, flags: 0x0}, + 68: {region: 0x134, script: 0xbc, flags: 0x0}, + 69: {region: 0x164, script: 0x52, flags: 0x0}, + 70: {region: 0x164, script: 0x52, flags: 0x0}, + 71: {region: 0x6d, script: 0x52, flags: 0x0}, + 72: {region: 0x164, script: 0x52, flags: 0x0}, + 73: {region: 0x164, script: 0x52, flags: 0x0}, + 74: {region: 0x48, script: 0x52, flags: 0x0}, + 75: {region: 0x164, script: 0x52, flags: 0x0}, + 76: {region: 0x105, script: 0x1e, flags: 0x0}, + 77: {region: 0x164, script: 0x5, flags: 0x0}, + 78: {region: 0x164, script: 0x52, flags: 0x0}, + 79: {region: 0x164, script: 0x52, flags: 0x0}, + 80: {region: 0x164, script: 0x52, flags: 0x0}, + 81: {region: 0x98, script: 0x20, flags: 0x0}, + 82: {region: 0x164, script: 0x52, flags: 0x0}, + 83: {region: 0x164, script: 0x52, flags: 0x0}, + 84: {region: 0x164, script: 0x52, flags: 0x0}, + 85: {region: 0x3e, script: 0x52, flags: 0x0}, + 86: {region: 0x164, script: 0x52, flags: 0x0}, + 87: {region: 0x3, script: 0x5, flags: 0x1}, + 88: {region: 0x105, script: 0x1e, flags: 0x0}, + 89: {region: 0xe7, script: 0x5, flags: 0x0}, + 90: {region: 0x94, script: 0x52, flags: 0x0}, + 91: {region: 0xda, script: 0x20, flags: 0x0}, + 92: {region: 0x2d, script: 0x52, flags: 0x0}, + 93: {region: 0x51, script: 0x52, flags: 0x0}, + 94: {region: 0x164, script: 0x52, flags: 0x0}, + 95: {region: 0x51, script: 0xb, flags: 0x0}, + 96: {region: 0x164, script: 0x52, flags: 0x0}, + 97: {region: 0x164, script: 0x52, flags: 0x0}, + 98: {region: 0x94, script: 0x52, flags: 0x0}, + 99: {region: 0x164, script: 0x52, flags: 0x0}, + 100: {region: 0x51, script: 0x52, flags: 0x0}, + 101: {region: 0x164, script: 0x52, flags: 0x0}, + 102: {region: 0x164, script: 0x52, flags: 0x0}, + 103: {region: 0x164, script: 0x52, flags: 0x0}, + 104: {region: 0x164, script: 0x52, flags: 0x0}, + 105: {region: 0x4e, script: 0x52, flags: 0x0}, + 106: {region: 0x164, script: 0x52, flags: 0x0}, + 107: {region: 0x164, script: 0x52, flags: 0x0}, + 108: {region: 0x164, script: 0x52, flags: 0x0}, + 109: {region: 0x164, script: 0x27, flags: 0x0}, + 110: {region: 0x164, script: 0x52, flags: 0x0}, + 111: {region: 0x164, script: 0x52, flags: 0x0}, + 112: {region: 0x46, script: 0x1e, flags: 0x0}, + 113: {region: 0x164, script: 0x52, flags: 0x0}, + 114: {region: 0x164, script: 0x52, flags: 0x0}, + 115: {region: 0x10a, script: 0x5, flags: 0x0}, + 116: {region: 0x161, script: 0x52, flags: 0x0}, + 117: {region: 0x164, script: 0x52, flags: 0x0}, + 118: {region: 0x94, script: 0x52, flags: 0x0}, + 119: {region: 0x164, script: 0x52, flags: 0x0}, + 120: {region: 0x12e, script: 0x52, flags: 0x0}, + 121: {region: 0x51, script: 0x52, flags: 0x0}, + 122: {region: 0x98, script: 0xcd, flags: 0x0}, + 123: {region: 0xe7, script: 0x5, flags: 0x0}, + 124: {region: 0x98, script: 0x20, flags: 0x0}, + 125: {region: 0x37, script: 0x1e, flags: 0x0}, + 126: {region: 0x98, script: 0x20, flags: 0x0}, + 127: {region: 0xe7, script: 0x5, flags: 0x0}, + 128: {region: 0x12a, script: 0x2d, flags: 0x0}, + 130: {region: 0x98, script: 0x20, flags: 0x0}, + 131: {region: 0x164, script: 0x52, flags: 0x0}, + 132: {region: 0x98, script: 0x20, flags: 0x0}, + 133: {region: 0xe6, script: 0x52, flags: 0x0}, + 134: {region: 0x164, script: 0x52, flags: 0x0}, + 135: {region: 0x98, script: 0x20, flags: 0x0}, + 136: {region: 0x164, script: 0x52, flags: 0x0}, + 137: {region: 0x13e, script: 0x52, flags: 0x0}, + 138: {region: 0x164, script: 0x52, flags: 0x0}, + 139: {region: 0x164, script: 0x52, flags: 0x0}, + 140: {region: 0xe6, script: 0x52, flags: 0x0}, + 141: {region: 0x164, script: 0x52, flags: 0x0}, + 142: {region: 0xd5, script: 0x52, flags: 0x0}, + 143: {region: 0x164, script: 0x52, flags: 0x0}, + 144: {region: 0x164, script: 0x52, flags: 0x0}, + 145: {region: 0x164, script: 0x52, flags: 0x0}, + 146: {region: 0x164, script: 0x27, flags: 0x0}, + 147: {region: 0x98, script: 0x20, flags: 0x0}, + 148: {region: 0x94, script: 0x52, flags: 0x0}, + 149: {region: 0x164, script: 0x52, flags: 0x0}, + 150: {region: 0x164, script: 0x52, flags: 0x0}, + 151: {region: 0x164, script: 0x52, flags: 0x0}, + 152: {region: 0x164, script: 0x52, flags: 0x0}, + 153: {region: 0x51, script: 0x52, flags: 0x0}, + 154: {region: 0x164, script: 0x52, flags: 0x0}, + 155: {region: 0xe6, script: 0x52, flags: 0x0}, + 156: {region: 0x164, script: 0x52, flags: 0x0}, + 157: {region: 0x13d, script: 0xcf, flags: 0x0}, + 158: {region: 0xc2, script: 0x52, flags: 0x0}, + 159: {region: 0x164, script: 0x52, flags: 0x0}, + 160: {region: 0x164, script: 0x52, flags: 0x0}, + 161: {region: 0xc2, script: 0x52, flags: 0x0}, + 162: {region: 0x164, script: 0x52, flags: 0x0}, + 163: {region: 0x34, script: 0xe, flags: 0x0}, + 164: {region: 0x164, script: 0x52, flags: 0x0}, + 165: {region: 0x164, script: 0x52, flags: 0x0}, + 166: {region: 0x164, script: 0x52, flags: 0x0}, + 167: {region: 0x52, script: 0xd6, flags: 0x0}, + 168: {region: 0x164, script: 0x52, flags: 0x0}, + 169: {region: 0x164, script: 0x52, flags: 0x0}, + 170: {region: 0x164, script: 0x52, flags: 0x0}, + 171: {region: 0x98, script: 0xe, flags: 0x0}, + 172: {region: 0x164, script: 0x52, flags: 0x0}, + 173: {region: 0x9b, script: 0x5, flags: 0x0}, + 174: {region: 0x164, script: 0x52, flags: 0x0}, + 175: {region: 0x4e, script: 0x52, flags: 0x0}, + 176: {region: 0x77, script: 0x52, flags: 0x0}, + 177: {region: 0x98, script: 0x20, flags: 0x0}, + 178: {region: 0xe7, script: 0x5, flags: 0x0}, + 179: {region: 0x98, script: 0x20, flags: 0x0}, + 180: {region: 0x164, script: 0x52, flags: 0x0}, + 181: {region: 0x32, script: 0x52, flags: 0x0}, + 182: {region: 0x164, script: 0x52, flags: 0x0}, + 183: {region: 0xb3, script: 0xc, flags: 0x0}, + 184: {region: 0x51, script: 0x52, flags: 0x0}, + 185: {region: 0x164, script: 0x27, flags: 0x0}, + 186: {region: 0xe6, script: 0x52, flags: 0x0}, + 187: {region: 0x164, script: 0x52, flags: 0x0}, + 188: {region: 0xe7, script: 0x20, flags: 0x0}, + 189: {region: 0x105, script: 0x1e, flags: 0x0}, + 190: {region: 0x15e, script: 0x52, flags: 0x0}, + 191: {region: 0x164, script: 0x52, flags: 0x0}, + 192: {region: 0x94, script: 0x52, flags: 0x0}, + 193: {region: 0x164, script: 0x52, flags: 0x0}, + 194: {region: 0x51, script: 0x52, flags: 0x0}, + 195: {region: 0x164, script: 0x52, flags: 0x0}, + 196: {region: 0x164, script: 0x52, flags: 0x0}, + 197: {region: 0x164, script: 0x52, flags: 0x0}, + 198: {region: 0x85, script: 0x52, flags: 0x0}, + 199: {region: 0x164, script: 0x52, flags: 0x0}, + 200: {region: 0x164, script: 0x52, flags: 0x0}, + 201: {region: 0x164, script: 0x52, flags: 0x0}, + 202: {region: 0x164, script: 0x52, flags: 0x0}, + 203: {region: 0x6c, script: 0x27, flags: 0x0}, + 204: {region: 0x164, script: 0x52, flags: 0x0}, + 205: {region: 0x164, script: 0x52, flags: 0x0}, + 206: {region: 0x51, script: 0x52, flags: 0x0}, + 207: {region: 0x164, script: 0x52, flags: 0x0}, + 208: {region: 0x164, script: 0x52, flags: 0x0}, + 209: {region: 0xc2, script: 0x52, flags: 0x0}, + 210: {region: 0x164, script: 0x52, flags: 0x0}, + 211: {region: 0x164, script: 0x52, flags: 0x0}, + 212: {region: 0x164, script: 0x52, flags: 0x0}, + 213: {region: 0x6d, script: 0x52, flags: 0x0}, + 214: {region: 0x164, script: 0x52, flags: 0x0}, + 215: {region: 0x164, script: 0x52, flags: 0x0}, + 216: {region: 0xd5, script: 0x52, flags: 0x0}, + 217: {region: 0x8, script: 0x2, flags: 0x1}, + 218: {region: 0x105, script: 0x1e, flags: 0x0}, + 219: {region: 0xe6, script: 0x52, flags: 0x0}, + 220: {region: 0x164, script: 0x52, flags: 0x0}, + 221: {region: 0x130, script: 0x52, flags: 0x0}, + 222: {region: 0x89, script: 0x52, flags: 0x0}, + 223: {region: 0x74, script: 0x52, flags: 0x0}, + 224: {region: 0x105, script: 0x1e, flags: 0x0}, + 225: {region: 0x134, script: 0x52, flags: 0x0}, + 226: {region: 0x48, script: 0x52, flags: 0x0}, + 227: {region: 0x134, script: 0x1a, flags: 0x0}, + 228: {region: 0xa5, script: 0x5, flags: 0x0}, + 229: {region: 0x13d, script: 0x19, flags: 0x0}, + 230: {region: 0x164, script: 0x52, flags: 0x0}, + 231: {region: 0x9a, script: 0x5, flags: 0x0}, + 232: {region: 0x164, script: 0x52, flags: 0x0}, + 233: {region: 0x164, script: 0x52, flags: 0x0}, + 234: {region: 0x164, script: 0x52, flags: 0x0}, + 235: {region: 0x164, script: 0x52, flags: 0x0}, + 236: {region: 0x164, script: 0x52, flags: 0x0}, + 237: {region: 0x77, script: 0x52, flags: 0x0}, + 238: {region: 0x6a, script: 0x1c, flags: 0x0}, + 239: {region: 0xe6, script: 0x52, flags: 0x0}, + 240: {region: 0x48, script: 0x17, flags: 0x0}, + 241: {region: 0x48, script: 0x17, flags: 0x0}, + 242: {region: 0x48, script: 0x17, flags: 0x0}, + 243: {region: 0x48, script: 0x17, flags: 0x0}, + 244: {region: 0x48, script: 0x17, flags: 0x0}, + 245: {region: 0x109, script: 0x52, flags: 0x0}, + 246: {region: 0x5d, script: 0x52, flags: 0x0}, + 247: {region: 0xe8, script: 0x52, flags: 0x0}, + 248: {region: 0x48, script: 0x17, flags: 0x0}, + 249: {region: 0xc3, script: 0x79, flags: 0x0}, + 250: {region: 0xa, script: 0x2, flags: 0x1}, + 251: {region: 0x105, script: 0x1e, flags: 0x0}, + 252: {region: 0x7a, script: 0x52, flags: 0x0}, + 253: {region: 0x62, script: 0x52, flags: 0x0}, + 254: {region: 0x164, script: 0x52, flags: 0x0}, + 255: {region: 0x164, script: 0x52, flags: 0x0}, + 256: {region: 0x164, script: 0x52, flags: 0x0}, + 257: {region: 0x164, script: 0x52, flags: 0x0}, + 258: {region: 0x134, script: 0x52, flags: 0x0}, + 259: {region: 0x105, script: 0x1e, flags: 0x0}, + 260: {region: 0xa3, script: 0x52, flags: 0x0}, + 261: {region: 0x164, script: 0x52, flags: 0x0}, + 262: {region: 0x164, script: 0x52, flags: 0x0}, + 263: {region: 0x98, script: 0x5, flags: 0x0}, + 264: {region: 0x164, script: 0x52, flags: 0x0}, + 265: {region: 0x5f, script: 0x52, flags: 0x0}, + 266: {region: 0x164, script: 0x52, flags: 0x0}, + 267: {region: 0x48, script: 0x52, flags: 0x0}, + 268: {region: 0x164, script: 0x52, flags: 0x0}, + 269: {region: 0x164, script: 0x52, flags: 0x0}, + 270: {region: 0x164, script: 0x52, flags: 0x0}, + 271: {region: 0x164, script: 0x5, flags: 0x0}, + 272: {region: 0x48, script: 0x52, flags: 0x0}, + 273: {region: 0x164, script: 0x52, flags: 0x0}, + 274: {region: 0x164, script: 0x52, flags: 0x0}, + 275: {region: 0xd3, script: 0x52, flags: 0x0}, + 276: {region: 0x4e, script: 0x52, flags: 0x0}, + 277: {region: 0x164, script: 0x52, flags: 0x0}, + 278: {region: 0x98, script: 0x5, flags: 0x0}, + 279: {region: 0x164, script: 0x52, flags: 0x0}, + 280: {region: 0x164, script: 0x52, flags: 0x0}, + 281: {region: 0x164, script: 0x52, flags: 0x0}, + 282: {region: 0x164, script: 0x27, flags: 0x0}, + 283: {region: 0x5f, script: 0x52, flags: 0x0}, + 284: {region: 0xc2, script: 0x52, flags: 0x0}, + 285: {region: 0xcf, script: 0x52, flags: 0x0}, + 286: {region: 0x164, script: 0x52, flags: 0x0}, + 287: {region: 0xda, script: 0x20, flags: 0x0}, + 288: {region: 0x51, script: 0x52, flags: 0x0}, + 289: {region: 0x164, script: 0x52, flags: 0x0}, + 290: {region: 0x164, script: 0x52, flags: 0x0}, + 291: {region: 0x164, script: 0x52, flags: 0x0}, + 292: {region: 0xcc, script: 0xd4, flags: 0x0}, + 293: {region: 0x164, script: 0x52, flags: 0x0}, + 294: {region: 0x164, script: 0x52, flags: 0x0}, + 295: {region: 0x113, script: 0x52, flags: 0x0}, + 296: {region: 0x36, script: 0x52, flags: 0x0}, + 297: {region: 0x42, script: 0xd6, flags: 0x0}, + 298: {region: 0x164, script: 0x52, flags: 0x0}, + 299: {region: 0xa3, script: 0x52, flags: 0x0}, + 300: {region: 0x7f, script: 0x52, flags: 0x0}, + 301: {region: 0xd5, script: 0x52, flags: 0x0}, + 302: {region: 0x9d, script: 0x52, flags: 0x0}, + 303: {region: 0x6a, script: 0x25, flags: 0x0}, + 304: {region: 0xc3, script: 0x43, flags: 0x0}, + 305: {region: 0x86, script: 0x2d, flags: 0x0}, + 306: {region: 0x164, script: 0x52, flags: 0x0}, + 307: {region: 0x164, script: 0x52, flags: 0x0}, + 308: {region: 0xc, script: 0x2, flags: 0x1}, + 309: {region: 0x164, script: 0x52, flags: 0x0}, + 310: {region: 0x164, script: 0x52, flags: 0x0}, + 311: {region: 0x1, script: 0x52, flags: 0x0}, + 312: {region: 0x164, script: 0x52, flags: 0x0}, + 313: {region: 0x6d, script: 0x52, flags: 0x0}, + 314: {region: 0x134, script: 0x52, flags: 0x0}, + 315: {region: 0x69, script: 0x52, flags: 0x0}, + 316: {region: 0x164, script: 0x52, flags: 0x0}, + 317: {region: 0x9d, script: 0x3e, flags: 0x0}, + 318: {region: 0x164, script: 0x52, flags: 0x0}, + 319: {region: 0x164, script: 0x52, flags: 0x0}, + 320: {region: 0x6d, script: 0x52, flags: 0x0}, + 321: {region: 0x51, script: 0x52, flags: 0x0}, + 322: {region: 0x6d, script: 0x52, flags: 0x0}, + 323: {region: 0x9b, script: 0x5, flags: 0x0}, + 324: {region: 0x164, script: 0x52, flags: 0x0}, + 325: {region: 0x164, script: 0x52, flags: 0x0}, + 326: {region: 0x164, script: 0x52, flags: 0x0}, + 327: {region: 0x164, script: 0x52, flags: 0x0}, + 328: {region: 0x85, script: 0x52, flags: 0x0}, + 329: {region: 0xe, script: 0x2, flags: 0x1}, + 330: {region: 0x164, script: 0x52, flags: 0x0}, + 331: {region: 0xc2, script: 0x52, flags: 0x0}, + 332: {region: 0x71, script: 0x52, flags: 0x0}, + 333: {region: 0x10a, script: 0x5, flags: 0x0}, + 334: {region: 0xe6, script: 0x52, flags: 0x0}, + 335: {region: 0x10b, script: 0x52, flags: 0x0}, + 336: {region: 0x72, script: 0x52, flags: 0x0}, + 337: {region: 0x164, script: 0x52, flags: 0x0}, + 338: {region: 0x164, script: 0x52, flags: 0x0}, + 339: {region: 0x75, script: 0x52, flags: 0x0}, + 340: {region: 0x164, script: 0x52, flags: 0x0}, + 341: {region: 0x3a, script: 0x52, flags: 0x0}, + 342: {region: 0x164, script: 0x52, flags: 0x0}, + 343: {region: 0x164, script: 0x52, flags: 0x0}, + 344: {region: 0x164, script: 0x52, flags: 0x0}, + 345: {region: 0x77, script: 0x52, flags: 0x0}, + 346: {region: 0x134, script: 0x52, flags: 0x0}, + 347: {region: 0x77, script: 0x52, flags: 0x0}, + 348: {region: 0x5f, script: 0x52, flags: 0x0}, + 349: {region: 0x5f, script: 0x52, flags: 0x0}, + 350: {region: 0x51, script: 0x5, flags: 0x0}, + 351: {region: 0x13f, script: 0x52, flags: 0x0}, + 352: {region: 0x164, script: 0x52, flags: 0x0}, + 353: {region: 0x83, script: 0x52, flags: 0x0}, + 354: {region: 0x164, script: 0x52, flags: 0x0}, + 355: {region: 0xd3, script: 0x52, flags: 0x0}, + 356: {region: 0x9d, script: 0x52, flags: 0x0}, + 357: {region: 0xd5, script: 0x52, flags: 0x0}, + 358: {region: 0x164, script: 0x52, flags: 0x0}, + 359: {region: 0x10a, script: 0x52, flags: 0x0}, + 360: {region: 0xd8, script: 0x52, flags: 0x0}, + 361: {region: 0x95, script: 0x52, flags: 0x0}, + 362: {region: 0x7f, script: 0x52, flags: 0x0}, + 363: {region: 0x164, script: 0x52, flags: 0x0}, + 364: {region: 0xbb, script: 0x52, flags: 0x0}, + 365: {region: 0x164, script: 0x52, flags: 0x0}, + 366: {region: 0x164, script: 0x52, flags: 0x0}, + 367: {region: 0x164, script: 0x52, flags: 0x0}, + 368: {region: 0x52, script: 0x34, flags: 0x0}, + 369: {region: 0x164, script: 0x52, flags: 0x0}, + 370: {region: 0x94, script: 0x52, flags: 0x0}, + 371: {region: 0x164, script: 0x52, flags: 0x0}, + 372: {region: 0x98, script: 0x20, flags: 0x0}, + 373: {region: 0x164, script: 0x52, flags: 0x0}, + 374: {region: 0x9b, script: 0x5, flags: 0x0}, + 375: {region: 0x7d, script: 0x52, flags: 0x0}, + 376: {region: 0x7a, script: 0x52, flags: 0x0}, + 377: {region: 0x164, script: 0x52, flags: 0x0}, + 378: {region: 0x164, script: 0x52, flags: 0x0}, + 379: {region: 0x164, script: 0x52, flags: 0x0}, + 380: {region: 0x164, script: 0x52, flags: 0x0}, + 381: {region: 0x164, script: 0x52, flags: 0x0}, + 382: {region: 0x164, script: 0x52, flags: 0x0}, + 383: {region: 0x6e, script: 0x27, flags: 0x0}, + 384: {region: 0x164, script: 0x52, flags: 0x0}, + 385: {region: 0xda, script: 0x20, flags: 0x0}, + 386: {region: 0x164, script: 0x52, flags: 0x0}, + 387: {region: 0xa6, script: 0x52, flags: 0x0}, + 388: {region: 0x164, script: 0x52, flags: 0x0}, + 389: {region: 0xe7, script: 0x5, flags: 0x0}, + 390: {region: 0x164, script: 0x52, flags: 0x0}, + 391: {region: 0xe7, script: 0x5, flags: 0x0}, + 392: {region: 0x164, script: 0x52, flags: 0x0}, + 393: {region: 0x164, script: 0x52, flags: 0x0}, + 394: {region: 0x6d, script: 0x52, flags: 0x0}, + 395: {region: 0x9b, script: 0x5, flags: 0x0}, + 396: {region: 0x164, script: 0x52, flags: 0x0}, + 397: {region: 0x164, script: 0x27, flags: 0x0}, + 398: {region: 0xf0, script: 0x52, flags: 0x0}, + 399: {region: 0x164, script: 0x52, flags: 0x0}, + 400: {region: 0x164, script: 0x52, flags: 0x0}, + 401: {region: 0x164, script: 0x52, flags: 0x0}, + 402: {region: 0x164, script: 0x27, flags: 0x0}, + 403: {region: 0x164, script: 0x52, flags: 0x0}, + 404: {region: 0x98, script: 0x20, flags: 0x0}, + 405: {region: 0x98, script: 0xd0, flags: 0x0}, + 406: {region: 0x94, script: 0x52, flags: 0x0}, + 407: {region: 0xd8, script: 0x52, flags: 0x0}, + 408: {region: 0x12f, script: 0x2b, flags: 0x0}, + 409: {region: 0x10, script: 0x2, flags: 0x1}, + 410: {region: 0x98, script: 0xe, flags: 0x0}, + 411: {region: 0x164, script: 0x52, flags: 0x0}, + 412: {region: 0x4d, script: 0x52, flags: 0x0}, + 413: {region: 0x98, script: 0x2e, flags: 0x0}, + 414: {region: 0x40, script: 0x52, flags: 0x0}, + 415: {region: 0x53, script: 0x52, flags: 0x0}, + 416: {region: 0x164, script: 0x52, flags: 0x0}, + 417: {region: 0x7f, script: 0x52, flags: 0x0}, + 418: {region: 0x164, script: 0x52, flags: 0x0}, + 419: {region: 0x164, script: 0x52, flags: 0x0}, + 420: {region: 0xa3, script: 0x52, flags: 0x0}, + 421: {region: 0x97, script: 0x52, flags: 0x0}, + 422: {region: 0x164, script: 0x52, flags: 0x0}, + 423: {region: 0xda, script: 0x20, flags: 0x0}, + 424: {region: 0x164, script: 0x52, flags: 0x0}, + 425: {region: 0x164, script: 0x5, flags: 0x0}, + 426: {region: 0x48, script: 0x52, flags: 0x0}, + 427: {region: 0x164, script: 0x5, flags: 0x0}, + 428: {region: 0x164, script: 0x52, flags: 0x0}, + 429: {region: 0x12, script: 0x3, flags: 0x1}, + 430: {region: 0x164, script: 0x52, flags: 0x0}, + 431: {region: 0x52, script: 0x34, flags: 0x0}, + 432: {region: 0x164, script: 0x52, flags: 0x0}, + 433: {region: 0x134, script: 0x52, flags: 0x0}, + 434: {region: 0x23, script: 0x5, flags: 0x0}, + 435: {region: 0x164, script: 0x52, flags: 0x0}, + 436: {region: 0x164, script: 0x27, flags: 0x0}, + 437: {region: 0x96, script: 0x37, flags: 0x0}, + 438: {region: 0x164, script: 0x52, flags: 0x0}, + 439: {region: 0x98, script: 0x20, flags: 0x0}, + 440: {region: 0x164, script: 0x52, flags: 0x0}, + 441: {region: 0x72, script: 0x52, flags: 0x0}, + 442: {region: 0x164, script: 0x52, flags: 0x0}, + 443: {region: 0x164, script: 0x52, flags: 0x0}, + 444: {region: 0xe6, script: 0x52, flags: 0x0}, + 445: {region: 0x164, script: 0x52, flags: 0x0}, + 446: {region: 0x12a, script: 0x39, flags: 0x0}, + 447: {region: 0x52, script: 0x81, flags: 0x0}, + 448: {region: 0x164, script: 0x52, flags: 0x0}, + 449: {region: 0xe7, script: 0x5, flags: 0x0}, + 450: {region: 0x98, script: 0x20, flags: 0x0}, + 451: {region: 0xae, script: 0x3a, flags: 0x0}, + 452: {region: 0xe6, script: 0x52, flags: 0x0}, + 453: {region: 0xe7, script: 0x5, flags: 0x0}, + 454: {region: 0xe5, script: 0x52, flags: 0x0}, + 455: {region: 0x98, script: 0x20, flags: 0x0}, + 456: {region: 0x98, script: 0x20, flags: 0x0}, + 457: {region: 0x164, script: 0x52, flags: 0x0}, + 458: {region: 0x8f, script: 0x52, flags: 0x0}, + 459: {region: 0x5f, script: 0x52, flags: 0x0}, + 460: {region: 0x52, script: 0x34, flags: 0x0}, + 461: {region: 0x90, script: 0x52, flags: 0x0}, + 462: {region: 0x91, script: 0x52, flags: 0x0}, + 463: {region: 0x164, script: 0x52, flags: 0x0}, + 464: {region: 0x27, script: 0x8, flags: 0x0}, + 465: {region: 0xd1, script: 0x52, flags: 0x0}, + 466: {region: 0x77, script: 0x52, flags: 0x0}, + 467: {region: 0x164, script: 0x52, flags: 0x0}, + 468: {region: 0x164, script: 0x52, flags: 0x0}, + 469: {region: 0xcf, script: 0x52, flags: 0x0}, + 470: {region: 0xd5, script: 0x52, flags: 0x0}, + 471: {region: 0x164, script: 0x52, flags: 0x0}, + 472: {region: 0x164, script: 0x52, flags: 0x0}, + 473: {region: 0x164, script: 0x52, flags: 0x0}, + 474: {region: 0x94, script: 0x52, flags: 0x0}, + 475: {region: 0x164, script: 0x52, flags: 0x0}, + 476: {region: 0x164, script: 0x52, flags: 0x0}, + 477: {region: 0x164, script: 0x52, flags: 0x0}, + 479: {region: 0xd5, script: 0x52, flags: 0x0}, + 480: {region: 0x164, script: 0x52, flags: 0x0}, + 481: {region: 0x164, script: 0x52, flags: 0x0}, + 482: {region: 0x52, script: 0xdf, flags: 0x0}, + 483: {region: 0x164, script: 0x52, flags: 0x0}, + 484: {region: 0x134, script: 0x52, flags: 0x0}, + 485: {region: 0x164, script: 0x52, flags: 0x0}, + 486: {region: 0x48, script: 0x52, flags: 0x0}, + 487: {region: 0x164, script: 0x52, flags: 0x0}, + 488: {region: 0x164, script: 0x52, flags: 0x0}, + 489: {region: 0xe6, script: 0x52, flags: 0x0}, + 490: {region: 0x164, script: 0x52, flags: 0x0}, + 491: {region: 0x94, script: 0x52, flags: 0x0}, + 492: {region: 0x105, script: 0x1e, flags: 0x0}, + 494: {region: 0x164, script: 0x52, flags: 0x0}, + 495: {region: 0x164, script: 0x52, flags: 0x0}, + 496: {region: 0x9c, script: 0x52, flags: 0x0}, + 497: {region: 0x9d, script: 0x52, flags: 0x0}, + 498: {region: 0x48, script: 0x17, flags: 0x0}, + 499: {region: 0x96, script: 0x37, flags: 0x0}, + 500: {region: 0x164, script: 0x52, flags: 0x0}, + 501: {region: 0x164, script: 0x52, flags: 0x0}, + 502: {region: 0x105, script: 0x52, flags: 0x0}, + 503: {region: 0x164, script: 0x52, flags: 0x0}, + 504: {region: 0xa1, script: 0x41, flags: 0x0}, + 505: {region: 0x164, script: 0x52, flags: 0x0}, + 506: {region: 0x9f, script: 0x52, flags: 0x0}, + 508: {region: 0x164, script: 0x52, flags: 0x0}, + 509: {region: 0x164, script: 0x52, flags: 0x0}, + 510: {region: 0x164, script: 0x52, flags: 0x0}, + 511: {region: 0x51, script: 0x52, flags: 0x0}, + 512: {region: 0x12f, script: 0x37, flags: 0x0}, + 513: {region: 0x164, script: 0x52, flags: 0x0}, + 514: {region: 0x12e, script: 0x52, flags: 0x0}, + 515: {region: 0xda, script: 0x20, flags: 0x0}, + 516: {region: 0x164, script: 0x52, flags: 0x0}, + 517: {region: 0x62, script: 0x52, flags: 0x0}, + 518: {region: 0x94, script: 0x52, flags: 0x0}, + 519: {region: 0x94, script: 0x52, flags: 0x0}, + 520: {region: 0x7c, script: 0x29, flags: 0x0}, + 521: {region: 0x136, script: 0x1e, flags: 0x0}, + 522: {region: 0x66, script: 0x52, flags: 0x0}, + 523: {region: 0xc3, script: 0x52, flags: 0x0}, + 524: {region: 0x164, script: 0x52, flags: 0x0}, + 525: {region: 0x164, script: 0x52, flags: 0x0}, + 526: {region: 0xd5, script: 0x52, flags: 0x0}, + 527: {region: 0xa3, script: 0x52, flags: 0x0}, + 528: {region: 0xc2, script: 0x52, flags: 0x0}, + 529: {region: 0x105, script: 0x1e, flags: 0x0}, + 530: {region: 0x164, script: 0x52, flags: 0x0}, + 531: {region: 0x164, script: 0x52, flags: 0x0}, + 532: {region: 0x164, script: 0x52, flags: 0x0}, + 533: {region: 0x164, script: 0x52, flags: 0x0}, + 534: {region: 0xd3, script: 0x5, flags: 0x0}, + 535: {region: 0xd5, script: 0x52, flags: 0x0}, + 536: {region: 0x163, script: 0x52, flags: 0x0}, + 537: {region: 0x164, script: 0x52, flags: 0x0}, + 538: {region: 0x164, script: 0x52, flags: 0x0}, + 539: {region: 0x12e, script: 0x52, flags: 0x0}, + 540: {region: 0x121, script: 0x5, flags: 0x0}, + 541: {region: 0x164, script: 0x52, flags: 0x0}, + 542: {region: 0x122, script: 0xd5, flags: 0x0}, + 543: {region: 0x59, script: 0x52, flags: 0x0}, + 544: {region: 0x51, script: 0x52, flags: 0x0}, + 545: {region: 0x164, script: 0x52, flags: 0x0}, + 546: {region: 0x4e, script: 0x52, flags: 0x0}, + 547: {region: 0x98, script: 0x20, flags: 0x0}, + 548: {region: 0x98, script: 0x20, flags: 0x0}, + 549: {region: 0x4a, script: 0x52, flags: 0x0}, + 550: {region: 0x94, script: 0x52, flags: 0x0}, + 551: {region: 0x164, script: 0x52, flags: 0x0}, + 552: {region: 0x40, script: 0x52, flags: 0x0}, + 553: {region: 0x98, script: 0x52, flags: 0x0}, + 554: {region: 0x52, script: 0xcc, flags: 0x0}, + 555: {region: 0x98, script: 0x20, flags: 0x0}, + 556: {region: 0xc2, script: 0x52, flags: 0x0}, + 557: {region: 0x164, script: 0x52, flags: 0x0}, + 558: {region: 0x98, script: 0x6b, flags: 0x0}, + 559: {region: 0xe7, script: 0x5, flags: 0x0}, + 560: {region: 0x164, script: 0x52, flags: 0x0}, + 561: {region: 0xa3, script: 0x52, flags: 0x0}, + 562: {region: 0x164, script: 0x52, flags: 0x0}, + 563: {region: 0x12a, script: 0x52, flags: 0x0}, + 564: {region: 0x164, script: 0x52, flags: 0x0}, + 565: {region: 0xd1, script: 0x52, flags: 0x0}, + 566: {region: 0x164, script: 0x52, flags: 0x0}, + 567: {region: 0xae, script: 0x4f, flags: 0x0}, + 568: {region: 0x164, script: 0x52, flags: 0x0}, + 569: {region: 0x164, script: 0x52, flags: 0x0}, + 570: {region: 0x15, script: 0x6, flags: 0x1}, + 571: {region: 0x164, script: 0x52, flags: 0x0}, + 572: {region: 0x51, script: 0x52, flags: 0x0}, + 573: {region: 0x81, script: 0x52, flags: 0x0}, + 574: {region: 0xa3, script: 0x52, flags: 0x0}, + 575: {region: 0x164, script: 0x52, flags: 0x0}, + 576: {region: 0x164, script: 0x52, flags: 0x0}, + 577: {region: 0x164, script: 0x52, flags: 0x0}, + 578: {region: 0xa5, script: 0x46, flags: 0x0}, + 579: {region: 0x29, script: 0x52, flags: 0x0}, + 580: {region: 0x164, script: 0x52, flags: 0x0}, + 581: {region: 0x164, script: 0x52, flags: 0x0}, + 582: {region: 0x164, script: 0x52, flags: 0x0}, + 583: {region: 0x164, script: 0x52, flags: 0x0}, + 584: {region: 0x164, script: 0x52, flags: 0x0}, + 585: {region: 0x98, script: 0x4a, flags: 0x0}, + 586: {region: 0x164, script: 0x52, flags: 0x0}, + 587: {region: 0xaa, script: 0x4b, flags: 0x0}, + 588: {region: 0x105, script: 0x1e, flags: 0x0}, + 589: {region: 0x98, script: 0x20, flags: 0x0}, + 590: {region: 0x164, script: 0x52, flags: 0x0}, + 591: {region: 0x74, script: 0x52, flags: 0x0}, + 592: {region: 0x164, script: 0x52, flags: 0x0}, + 593: {region: 0xb3, script: 0x52, flags: 0x0}, + 594: {region: 0x164, script: 0x52, flags: 0x0}, + 595: {region: 0x164, script: 0x52, flags: 0x0}, + 596: {region: 0x164, script: 0x52, flags: 0x0}, + 597: {region: 0x164, script: 0x52, flags: 0x0}, + 598: {region: 0x164, script: 0x52, flags: 0x0}, + 599: {region: 0x164, script: 0x52, flags: 0x0}, + 600: {region: 0x164, script: 0x52, flags: 0x0}, + 601: {region: 0x164, script: 0x27, flags: 0x0}, + 603: {region: 0x105, script: 0x1e, flags: 0x0}, + 604: {region: 0x111, script: 0x52, flags: 0x0}, + 605: {region: 0xe6, script: 0x52, flags: 0x0}, + 606: {region: 0x105, script: 0x52, flags: 0x0}, + 607: {region: 0x164, script: 0x52, flags: 0x0}, + 608: {region: 0x98, script: 0x20, flags: 0x0}, + 609: {region: 0x98, script: 0x5, flags: 0x0}, + 610: {region: 0x12e, script: 0x52, flags: 0x0}, + 611: {region: 0x164, script: 0x52, flags: 0x0}, + 612: {region: 0x51, script: 0x52, flags: 0x0}, + 613: {region: 0x5f, script: 0x52, flags: 0x0}, + 614: {region: 0x164, script: 0x52, flags: 0x0}, + 615: {region: 0x164, script: 0x52, flags: 0x0}, + 616: {region: 0x164, script: 0x27, flags: 0x0}, + 617: {region: 0x164, script: 0x52, flags: 0x0}, + 618: {region: 0x164, script: 0x52, flags: 0x0}, + 619: {region: 0x1b, script: 0x3, flags: 0x1}, + 620: {region: 0x164, script: 0x52, flags: 0x0}, + 621: {region: 0x164, script: 0x52, flags: 0x0}, + 622: {region: 0x164, script: 0x52, flags: 0x0}, + 623: {region: 0x164, script: 0x52, flags: 0x0}, + 624: {region: 0x105, script: 0x1e, flags: 0x0}, + 625: {region: 0x164, script: 0x52, flags: 0x0}, + 626: {region: 0x164, script: 0x52, flags: 0x0}, + 627: {region: 0x164, script: 0x52, flags: 0x0}, + 628: {region: 0x105, script: 0x1e, flags: 0x0}, + 629: {region: 0x164, script: 0x52, flags: 0x0}, + 630: {region: 0x94, script: 0x52, flags: 0x0}, + 631: {region: 0xe7, script: 0x5, flags: 0x0}, + 632: {region: 0x7a, script: 0x52, flags: 0x0}, + 633: {region: 0x164, script: 0x52, flags: 0x0}, + 634: {region: 0x164, script: 0x52, flags: 0x0}, + 635: {region: 0x164, script: 0x52, flags: 0x0}, + 636: {region: 0x164, script: 0x27, flags: 0x0}, + 637: {region: 0x122, script: 0xd5, flags: 0x0}, + 638: {region: 0xe7, script: 0x5, flags: 0x0}, + 639: {region: 0x164, script: 0x52, flags: 0x0}, + 640: {region: 0x164, script: 0x52, flags: 0x0}, + 641: {region: 0x1e, script: 0x5, flags: 0x1}, + 642: {region: 0x164, script: 0x52, flags: 0x0}, + 643: {region: 0x164, script: 0x52, flags: 0x0}, + 644: {region: 0x164, script: 0x52, flags: 0x0}, + 645: {region: 0x137, script: 0x52, flags: 0x0}, + 646: {region: 0x86, script: 0x56, flags: 0x0}, + 647: {region: 0x96, script: 0x37, flags: 0x0}, + 648: {region: 0x12e, script: 0x52, flags: 0x0}, + 649: {region: 0xe7, script: 0x5, flags: 0x0}, + 650: {region: 0x130, script: 0x52, flags: 0x0}, + 651: {region: 0x164, script: 0x52, flags: 0x0}, + 652: {region: 0xb6, script: 0x52, flags: 0x0}, + 653: {region: 0x105, script: 0x1e, flags: 0x0}, + 654: {region: 0x164, script: 0x52, flags: 0x0}, + 655: {region: 0x94, script: 0x52, flags: 0x0}, + 656: {region: 0x164, script: 0x52, flags: 0x0}, + 657: {region: 0x52, script: 0xd5, flags: 0x0}, + 658: {region: 0x164, script: 0x52, flags: 0x0}, + 659: {region: 0x164, script: 0x52, flags: 0x0}, + 660: {region: 0x164, script: 0x52, flags: 0x0}, + 661: {region: 0x164, script: 0x52, flags: 0x0}, + 662: {region: 0x98, script: 0x54, flags: 0x0}, + 663: {region: 0x164, script: 0x52, flags: 0x0}, + 664: {region: 0x164, script: 0x52, flags: 0x0}, + 665: {region: 0x105, script: 0x1e, flags: 0x0}, + 666: {region: 0x130, script: 0x52, flags: 0x0}, + 667: {region: 0x164, script: 0x52, flags: 0x0}, + 668: {region: 0xd8, script: 0x52, flags: 0x0}, + 669: {region: 0x164, script: 0x52, flags: 0x0}, + 670: {region: 0x164, script: 0x52, flags: 0x0}, + 671: {region: 0x23, script: 0x2, flags: 0x1}, + 672: {region: 0x164, script: 0x52, flags: 0x0}, + 673: {region: 0x164, script: 0x52, flags: 0x0}, + 674: {region: 0x9d, script: 0x52, flags: 0x0}, + 675: {region: 0x52, script: 0x58, flags: 0x0}, + 676: {region: 0x94, script: 0x52, flags: 0x0}, + 677: {region: 0x9b, script: 0x5, flags: 0x0}, + 678: {region: 0x134, script: 0x52, flags: 0x0}, + 679: {region: 0x164, script: 0x52, flags: 0x0}, + 680: {region: 0x164, script: 0x52, flags: 0x0}, + 681: {region: 0x98, script: 0xd0, flags: 0x0}, + 682: {region: 0x9d, script: 0x52, flags: 0x0}, + 683: {region: 0x164, script: 0x52, flags: 0x0}, + 684: {region: 0x4a, script: 0x52, flags: 0x0}, + 685: {region: 0x164, script: 0x52, flags: 0x0}, + 686: {region: 0x164, script: 0x52, flags: 0x0}, + 687: {region: 0xae, script: 0x4f, flags: 0x0}, + 688: {region: 0x164, script: 0x52, flags: 0x0}, + 689: {region: 0x164, script: 0x52, flags: 0x0}, + 690: {region: 0x4a, script: 0x52, flags: 0x0}, + 691: {region: 0x164, script: 0x52, flags: 0x0}, + 692: {region: 0x164, script: 0x52, flags: 0x0}, + 693: {region: 0x161, script: 0x52, flags: 0x0}, + 694: {region: 0x9b, script: 0x5, flags: 0x0}, + 695: {region: 0xb5, script: 0x52, flags: 0x0}, + 696: {region: 0xb7, script: 0x52, flags: 0x0}, + 697: {region: 0x4a, script: 0x52, flags: 0x0}, + 698: {region: 0x4a, script: 0x52, flags: 0x0}, + 699: {region: 0xa3, script: 0x52, flags: 0x0}, + 700: {region: 0xa3, script: 0x52, flags: 0x0}, + 701: {region: 0x9b, script: 0x5, flags: 0x0}, + 702: {region: 0xb7, script: 0x52, flags: 0x0}, + 703: {region: 0x122, script: 0xd5, flags: 0x0}, + 704: {region: 0x52, script: 0x34, flags: 0x0}, + 705: {region: 0x12a, script: 0x52, flags: 0x0}, + 706: {region: 0x94, script: 0x52, flags: 0x0}, + 707: {region: 0x51, script: 0x52, flags: 0x0}, + 708: {region: 0x98, script: 0x20, flags: 0x0}, + 709: {region: 0x98, script: 0x20, flags: 0x0}, + 710: {region: 0x94, script: 0x52, flags: 0x0}, + 711: {region: 0x25, script: 0x3, flags: 0x1}, + 712: {region: 0xa3, script: 0x52, flags: 0x0}, + 713: {region: 0x164, script: 0x52, flags: 0x0}, + 714: {region: 0xce, script: 0x52, flags: 0x0}, + 715: {region: 0x164, script: 0x52, flags: 0x0}, + 716: {region: 0x164, script: 0x52, flags: 0x0}, + 717: {region: 0x164, script: 0x52, flags: 0x0}, + 718: {region: 0x164, script: 0x52, flags: 0x0}, + 719: {region: 0x164, script: 0x52, flags: 0x0}, + 720: {region: 0x164, script: 0x52, flags: 0x0}, + 721: {region: 0x164, script: 0x52, flags: 0x0}, + 722: {region: 0x164, script: 0x52, flags: 0x0}, + 723: {region: 0x164, script: 0x52, flags: 0x0}, + 724: {region: 0x164, script: 0x52, flags: 0x0}, + 725: {region: 0x164, script: 0x52, flags: 0x0}, + 726: {region: 0x164, script: 0x5, flags: 0x0}, + 727: {region: 0x105, script: 0x1e, flags: 0x0}, + 728: {region: 0xe6, script: 0x52, flags: 0x0}, + 729: {region: 0x164, script: 0x52, flags: 0x0}, + 730: {region: 0x94, script: 0x52, flags: 0x0}, + 731: {region: 0x164, script: 0x27, flags: 0x0}, + 732: {region: 0x164, script: 0x52, flags: 0x0}, + 733: {region: 0x164, script: 0x52, flags: 0x0}, + 734: {region: 0x164, script: 0x52, flags: 0x0}, + 735: {region: 0x111, script: 0x52, flags: 0x0}, + 736: {region: 0xa3, script: 0x52, flags: 0x0}, + 737: {region: 0x164, script: 0x52, flags: 0x0}, + 738: {region: 0x164, script: 0x52, flags: 0x0}, + 739: {region: 0x122, script: 0x5, flags: 0x0}, + 740: {region: 0xcb, script: 0x52, flags: 0x0}, + 741: {region: 0x164, script: 0x52, flags: 0x0}, + 742: {region: 0x164, script: 0x52, flags: 0x0}, + 743: {region: 0x164, script: 0x52, flags: 0x0}, + 744: {region: 0xbe, script: 0x52, flags: 0x0}, + 745: {region: 0xd0, script: 0x52, flags: 0x0}, + 746: {region: 0x164, script: 0x52, flags: 0x0}, + 747: {region: 0x51, script: 0x52, flags: 0x0}, + 748: {region: 0xda, script: 0x20, flags: 0x0}, + 749: {region: 0x12e, script: 0x52, flags: 0x0}, + 750: {region: 0xbf, script: 0x52, flags: 0x0}, + 751: {region: 0x164, script: 0x52, flags: 0x0}, + 752: {region: 0x164, script: 0x52, flags: 0x0}, + 753: {region: 0xdf, script: 0x52, flags: 0x0}, + 754: {region: 0x164, script: 0x52, flags: 0x0}, + 755: {region: 0x94, script: 0x52, flags: 0x0}, + 756: {region: 0x9a, script: 0x36, flags: 0x0}, + 757: {region: 0x164, script: 0x52, flags: 0x0}, + 758: {region: 0xc1, script: 0x1e, flags: 0x0}, + 759: {region: 0x164, script: 0x5, flags: 0x0}, + 760: {region: 0x164, script: 0x52, flags: 0x0}, + 761: {region: 0x164, script: 0x52, flags: 0x0}, + 762: {region: 0x164, script: 0x52, flags: 0x0}, + 763: {region: 0x98, script: 0x64, flags: 0x0}, + 764: {region: 0x164, script: 0x52, flags: 0x0}, + 765: {region: 0x164, script: 0x52, flags: 0x0}, + 766: {region: 0x10a, script: 0x52, flags: 0x0}, + 767: {region: 0x164, script: 0x52, flags: 0x0}, + 768: {region: 0x164, script: 0x52, flags: 0x0}, + 769: {region: 0x164, script: 0x52, flags: 0x0}, + 770: {region: 0x28, script: 0x3, flags: 0x1}, + 771: {region: 0x164, script: 0x52, flags: 0x0}, + 772: {region: 0x164, script: 0x52, flags: 0x0}, + 773: {region: 0x98, script: 0xe, flags: 0x0}, + 774: {region: 0xc3, script: 0x6b, flags: 0x0}, + 776: {region: 0x164, script: 0x52, flags: 0x0}, + 777: {region: 0x48, script: 0x52, flags: 0x0}, + 778: {region: 0x48, script: 0x52, flags: 0x0}, + 779: {region: 0x36, script: 0x52, flags: 0x0}, + 780: {region: 0x164, script: 0x52, flags: 0x0}, + 781: {region: 0x164, script: 0x52, flags: 0x0}, + 782: {region: 0x164, script: 0x52, flags: 0x0}, + 783: {region: 0x164, script: 0x52, flags: 0x0}, + 784: {region: 0x164, script: 0x52, flags: 0x0}, + 785: {region: 0x164, script: 0x52, flags: 0x0}, + 786: {region: 0x98, script: 0x20, flags: 0x0}, + 787: {region: 0xda, script: 0x20, flags: 0x0}, + 788: {region: 0x105, script: 0x1e, flags: 0x0}, + 789: {region: 0x34, script: 0x68, flags: 0x0}, + 790: {region: 0x2b, script: 0x3, flags: 0x1}, + 791: {region: 0xca, script: 0x52, flags: 0x0}, + 792: {region: 0x164, script: 0x52, flags: 0x0}, + 793: {region: 0x164, script: 0x52, flags: 0x0}, + 794: {region: 0x164, script: 0x52, flags: 0x0}, + 795: {region: 0x98, script: 0x20, flags: 0x0}, + 796: {region: 0x51, script: 0x52, flags: 0x0}, + 798: {region: 0x164, script: 0x52, flags: 0x0}, + 799: {region: 0x134, script: 0x52, flags: 0x0}, + 800: {region: 0x164, script: 0x52, flags: 0x0}, + 801: {region: 0x164, script: 0x52, flags: 0x0}, + 802: {region: 0xe7, script: 0x5, flags: 0x0}, + 803: {region: 0xc2, script: 0x52, flags: 0x0}, + 804: {region: 0x98, script: 0x20, flags: 0x0}, + 805: {region: 0x94, script: 0x52, flags: 0x0}, + 806: {region: 0x163, script: 0x52, flags: 0x0}, + 807: {region: 0x164, script: 0x52, flags: 0x0}, + 808: {region: 0xc3, script: 0x6b, flags: 0x0}, + 809: {region: 0x164, script: 0x52, flags: 0x0}, + 810: {region: 0x164, script: 0x27, flags: 0x0}, + 811: {region: 0x105, script: 0x1e, flags: 0x0}, + 812: {region: 0x164, script: 0x52, flags: 0x0}, + 813: {region: 0x130, script: 0x52, flags: 0x0}, + 814: {region: 0x9b, script: 0x5d, flags: 0x0}, + 815: {region: 0x164, script: 0x52, flags: 0x0}, + 816: {region: 0x164, script: 0x52, flags: 0x0}, + 817: {region: 0x9b, script: 0x5, flags: 0x0}, + 818: {region: 0x164, script: 0x52, flags: 0x0}, + 819: {region: 0x164, script: 0x52, flags: 0x0}, + 820: {region: 0x164, script: 0x52, flags: 0x0}, + 821: {region: 0xdc, script: 0x52, flags: 0x0}, + 822: {region: 0x164, script: 0x52, flags: 0x0}, + 823: {region: 0x164, script: 0x52, flags: 0x0}, + 825: {region: 0x164, script: 0x52, flags: 0x0}, + 826: {region: 0x52, script: 0x34, flags: 0x0}, + 827: {region: 0x9d, script: 0x52, flags: 0x0}, + 828: {region: 0xd1, script: 0x52, flags: 0x0}, + 829: {region: 0x164, script: 0x52, flags: 0x0}, + 830: {region: 0xd9, script: 0x52, flags: 0x0}, + 831: {region: 0x164, script: 0x52, flags: 0x0}, + 832: {region: 0x164, script: 0x52, flags: 0x0}, + 833: {region: 0x164, script: 0x52, flags: 0x0}, + 834: {region: 0xce, script: 0x52, flags: 0x0}, + 835: {region: 0x164, script: 0x52, flags: 0x0}, + 836: {region: 0x164, script: 0x52, flags: 0x0}, + 837: {region: 0x163, script: 0x52, flags: 0x0}, + 838: {region: 0xd0, script: 0x52, flags: 0x0}, + 839: {region: 0x5f, script: 0x52, flags: 0x0}, + 840: {region: 0xda, script: 0x20, flags: 0x0}, + 841: {region: 0x164, script: 0x52, flags: 0x0}, + 842: {region: 0xda, script: 0x20, flags: 0x0}, + 843: {region: 0x164, script: 0x52, flags: 0x0}, + 844: {region: 0x164, script: 0x52, flags: 0x0}, + 845: {region: 0xd1, script: 0x52, flags: 0x0}, + 846: {region: 0x164, script: 0x52, flags: 0x0}, + 847: {region: 0x164, script: 0x52, flags: 0x0}, + 848: {region: 0xd0, script: 0x52, flags: 0x0}, + 849: {region: 0x164, script: 0x52, flags: 0x0}, + 850: {region: 0xce, script: 0x52, flags: 0x0}, + 851: {region: 0xce, script: 0x52, flags: 0x0}, + 852: {region: 0x164, script: 0x52, flags: 0x0}, + 853: {region: 0x164, script: 0x52, flags: 0x0}, + 854: {region: 0x94, script: 0x52, flags: 0x0}, + 855: {region: 0x164, script: 0x52, flags: 0x0}, + 856: {region: 0xde, script: 0x52, flags: 0x0}, + 857: {region: 0x164, script: 0x52, flags: 0x0}, + 858: {region: 0x164, script: 0x52, flags: 0x0}, + 859: {region: 0x98, script: 0x52, flags: 0x0}, + 860: {region: 0x164, script: 0x52, flags: 0x0}, + 861: {region: 0x164, script: 0x52, flags: 0x0}, + 862: {region: 0xd8, script: 0x52, flags: 0x0}, + 863: {region: 0x51, script: 0x52, flags: 0x0}, + 864: {region: 0x164, script: 0x52, flags: 0x0}, + 865: {region: 0xd9, script: 0x52, flags: 0x0}, + 866: {region: 0x164, script: 0x52, flags: 0x0}, + 867: {region: 0x51, script: 0x52, flags: 0x0}, + 868: {region: 0x164, script: 0x52, flags: 0x0}, + 869: {region: 0x164, script: 0x52, flags: 0x0}, + 870: {region: 0xd9, script: 0x52, flags: 0x0}, + 871: {region: 0x122, script: 0x4e, flags: 0x0}, + 872: {region: 0x98, script: 0x20, flags: 0x0}, + 873: {region: 0x10b, script: 0xb7, flags: 0x0}, + 874: {region: 0x164, script: 0x52, flags: 0x0}, + 875: {region: 0x164, script: 0x52, flags: 0x0}, + 876: {region: 0x83, script: 0x70, flags: 0x0}, + 877: {region: 0x160, script: 0x52, flags: 0x0}, + 878: {region: 0x164, script: 0x52, flags: 0x0}, + 879: {region: 0x48, script: 0x17, flags: 0x0}, + 880: {region: 0x164, script: 0x52, flags: 0x0}, + 881: {region: 0x160, script: 0x52, flags: 0x0}, + 882: {region: 0x164, script: 0x52, flags: 0x0}, + 883: {region: 0x164, script: 0x52, flags: 0x0}, + 884: {region: 0x164, script: 0x52, flags: 0x0}, + 885: {region: 0x164, script: 0x52, flags: 0x0}, + 886: {region: 0x164, script: 0x52, flags: 0x0}, + 887: {region: 0x116, script: 0x52, flags: 0x0}, + 888: {region: 0x164, script: 0x52, flags: 0x0}, + 889: {region: 0x164, script: 0x52, flags: 0x0}, + 890: {region: 0x134, script: 0x52, flags: 0x0}, + 891: {region: 0x164, script: 0x52, flags: 0x0}, + 892: {region: 0x52, script: 0x52, flags: 0x0}, + 893: {region: 0x164, script: 0x52, flags: 0x0}, + 894: {region: 0xcd, script: 0x52, flags: 0x0}, + 895: {region: 0x12e, script: 0x52, flags: 0x0}, + 896: {region: 0x130, script: 0x52, flags: 0x0}, + 897: {region: 0x7f, script: 0x52, flags: 0x0}, + 898: {region: 0x77, script: 0x52, flags: 0x0}, + 899: {region: 0x164, script: 0x52, flags: 0x0}, + 901: {region: 0x164, script: 0x52, flags: 0x0}, + 902: {region: 0x164, script: 0x52, flags: 0x0}, + 903: {region: 0x6e, script: 0x52, flags: 0x0}, + 904: {region: 0x164, script: 0x52, flags: 0x0}, + 905: {region: 0x164, script: 0x52, flags: 0x0}, + 906: {region: 0x164, script: 0x52, flags: 0x0}, + 907: {region: 0x164, script: 0x52, flags: 0x0}, + 908: {region: 0x98, script: 0x75, flags: 0x0}, + 909: {region: 0x164, script: 0x52, flags: 0x0}, + 910: {region: 0x164, script: 0x5, flags: 0x0}, + 911: {region: 0x7c, script: 0x1e, flags: 0x0}, + 912: {region: 0x134, script: 0x76, flags: 0x0}, + 913: {region: 0x164, script: 0x5, flags: 0x0}, + 914: {region: 0xc4, script: 0x74, flags: 0x0}, + 915: {region: 0x164, script: 0x52, flags: 0x0}, + 916: {region: 0x2e, script: 0x3, flags: 0x1}, + 917: {region: 0xe6, script: 0x52, flags: 0x0}, + 918: {region: 0x31, script: 0x2, flags: 0x1}, + 919: {region: 0xe6, script: 0x52, flags: 0x0}, + 920: {region: 0x2f, script: 0x52, flags: 0x0}, + 921: {region: 0xef, script: 0x52, flags: 0x0}, + 922: {region: 0x164, script: 0x52, flags: 0x0}, + 923: {region: 0x77, script: 0x52, flags: 0x0}, + 924: {region: 0xd5, script: 0x52, flags: 0x0}, + 925: {region: 0x134, script: 0x52, flags: 0x0}, + 926: {region: 0x48, script: 0x52, flags: 0x0}, + 927: {region: 0x164, script: 0x52, flags: 0x0}, + 928: {region: 0x9b, script: 0xdd, flags: 0x0}, + 929: {region: 0x164, script: 0x52, flags: 0x0}, + 930: {region: 0x5f, script: 0x52, flags: 0x0}, + 931: {region: 0x164, script: 0x5, flags: 0x0}, + 932: {region: 0xaf, script: 0x7f, flags: 0x0}, + 934: {region: 0x164, script: 0x52, flags: 0x0}, + 935: {region: 0x164, script: 0x52, flags: 0x0}, + 936: {region: 0x98, script: 0x12, flags: 0x0}, + 937: {region: 0xa3, script: 0x52, flags: 0x0}, + 938: {region: 0xe8, script: 0x52, flags: 0x0}, + 939: {region: 0x164, script: 0x52, flags: 0x0}, + 940: {region: 0x9d, script: 0x52, flags: 0x0}, + 941: {region: 0x164, script: 0x52, flags: 0x0}, + 942: {region: 0x164, script: 0x52, flags: 0x0}, + 943: {region: 0x86, script: 0x2d, flags: 0x0}, + 944: {region: 0x74, script: 0x52, flags: 0x0}, + 945: {region: 0x164, script: 0x52, flags: 0x0}, + 946: {region: 0xe7, script: 0x45, flags: 0x0}, + 947: {region: 0x9b, script: 0x5, flags: 0x0}, + 948: {region: 0x1, script: 0x52, flags: 0x0}, + 949: {region: 0x23, script: 0x5, flags: 0x0}, + 950: {region: 0x164, script: 0x52, flags: 0x0}, + 951: {region: 0x40, script: 0x52, flags: 0x0}, + 952: {region: 0x164, script: 0x52, flags: 0x0}, + 953: {region: 0x79, script: 0x52, flags: 0x0}, + 954: {region: 0x164, script: 0x52, flags: 0x0}, + 955: {region: 0xe3, script: 0x52, flags: 0x0}, + 956: {region: 0x88, script: 0x52, flags: 0x0}, + 957: {region: 0x68, script: 0x52, flags: 0x0}, + 958: {region: 0x164, script: 0x52, flags: 0x0}, + 959: {region: 0x98, script: 0x20, flags: 0x0}, + 960: {region: 0x164, script: 0x52, flags: 0x0}, + 961: {region: 0x101, script: 0x52, flags: 0x0}, + 962: {region: 0x94, script: 0x52, flags: 0x0}, + 963: {region: 0x164, script: 0x52, flags: 0x0}, + 964: {region: 0x164, script: 0x52, flags: 0x0}, + 965: {region: 0x9d, script: 0x52, flags: 0x0}, + 966: {region: 0x164, script: 0x5, flags: 0x0}, + 967: {region: 0x98, script: 0x52, flags: 0x0}, + 968: {region: 0x33, script: 0x2, flags: 0x1}, + 969: {region: 0xda, script: 0x20, flags: 0x0}, + 970: {region: 0x34, script: 0xe, flags: 0x0}, + 971: {region: 0x4d, script: 0x52, flags: 0x0}, + 972: {region: 0x71, script: 0x52, flags: 0x0}, + 973: {region: 0x4d, script: 0x52, flags: 0x0}, + 974: {region: 0x9b, script: 0x5, flags: 0x0}, + 975: {region: 0x10b, script: 0x52, flags: 0x0}, + 976: {region: 0x39, script: 0x52, flags: 0x0}, + 977: {region: 0x164, script: 0x52, flags: 0x0}, + 978: {region: 0xd0, script: 0x52, flags: 0x0}, + 979: {region: 0x103, script: 0x52, flags: 0x0}, + 980: {region: 0x94, script: 0x52, flags: 0x0}, + 981: {region: 0x12e, script: 0x52, flags: 0x0}, + 982: {region: 0x164, script: 0x52, flags: 0x0}, + 983: {region: 0x164, script: 0x52, flags: 0x0}, + 984: {region: 0x72, script: 0x52, flags: 0x0}, + 985: {region: 0x105, script: 0x1e, flags: 0x0}, + 986: {region: 0x12f, script: 0x1e, flags: 0x0}, + 987: {region: 0x108, script: 0x52, flags: 0x0}, + 988: {region: 0x106, script: 0x52, flags: 0x0}, + 989: {region: 0x12e, script: 0x52, flags: 0x0}, + 990: {region: 0x164, script: 0x52, flags: 0x0}, + 991: {region: 0xa1, script: 0x44, flags: 0x0}, + 992: {region: 0x98, script: 0x20, flags: 0x0}, + 993: {region: 0x7f, script: 0x52, flags: 0x0}, + 994: {region: 0x105, script: 0x1e, flags: 0x0}, + 995: {region: 0xa3, script: 0x52, flags: 0x0}, + 996: {region: 0x94, script: 0x52, flags: 0x0}, + 997: {region: 0x98, script: 0x52, flags: 0x0}, + 998: {region: 0x98, script: 0xbb, flags: 0x0}, + 999: {region: 0x164, script: 0x52, flags: 0x0}, + 1000: {region: 0x164, script: 0x52, flags: 0x0}, + 1001: {region: 0x12e, script: 0x52, flags: 0x0}, + 1002: {region: 0x9d, script: 0x52, flags: 0x0}, + 1003: {region: 0x98, script: 0x20, flags: 0x0}, + 1004: {region: 0x164, script: 0x5, flags: 0x0}, + 1005: {region: 0x9d, script: 0x52, flags: 0x0}, + 1006: {region: 0x7a, script: 0x52, flags: 0x0}, + 1007: {region: 0x48, script: 0x52, flags: 0x0}, + 1008: {region: 0x35, script: 0x4, flags: 0x1}, + 1009: {region: 0x9d, script: 0x52, flags: 0x0}, + 1010: {region: 0x9b, script: 0x5, flags: 0x0}, + 1011: {region: 0xd9, script: 0x52, flags: 0x0}, + 1012: {region: 0x4e, script: 0x52, flags: 0x0}, + 1013: {region: 0xd0, script: 0x52, flags: 0x0}, + 1014: {region: 0xce, script: 0x52, flags: 0x0}, + 1015: {region: 0xc2, script: 0x52, flags: 0x0}, + 1016: {region: 0x4b, script: 0x52, flags: 0x0}, + 1017: {region: 0x95, script: 0x72, flags: 0x0}, + 1018: {region: 0xb5, script: 0x52, flags: 0x0}, + 1019: {region: 0x164, script: 0x27, flags: 0x0}, + 1020: {region: 0x164, script: 0x52, flags: 0x0}, + 1022: {region: 0xb9, script: 0xd2, flags: 0x0}, + 1023: {region: 0x164, script: 0x52, flags: 0x0}, + 1024: {region: 0xc3, script: 0x6b, flags: 0x0}, + 1025: {region: 0x164, script: 0x5, flags: 0x0}, + 1026: {region: 0xb2, script: 0xc1, flags: 0x0}, + 1027: {region: 0x6e, script: 0x52, flags: 0x0}, + 1028: {region: 0x164, script: 0x52, flags: 0x0}, + 1029: {region: 0x164, script: 0x52, flags: 0x0}, + 1030: {region: 0x164, script: 0x52, flags: 0x0}, + 1031: {region: 0x164, script: 0x52, flags: 0x0}, + 1032: {region: 0x110, script: 0x52, flags: 0x0}, + 1033: {region: 0x164, script: 0x52, flags: 0x0}, + 1034: {region: 0xe7, script: 0x5, flags: 0x0}, + 1035: {region: 0x164, script: 0x52, flags: 0x0}, + 1036: {region: 0x10e, script: 0x52, flags: 0x0}, + 1037: {region: 0x164, script: 0x52, flags: 0x0}, + 1038: {region: 0xe8, script: 0x52, flags: 0x0}, + 1039: {region: 0x164, script: 0x52, flags: 0x0}, + 1040: {region: 0x94, script: 0x52, flags: 0x0}, + 1041: {region: 0x141, script: 0x52, flags: 0x0}, + 1042: {region: 0x10b, script: 0x52, flags: 0x0}, + 1044: {region: 0x10b, script: 0x52, flags: 0x0}, + 1045: {region: 0x71, script: 0x52, flags: 0x0}, + 1046: {region: 0x96, script: 0xb8, flags: 0x0}, + 1047: {region: 0x164, script: 0x52, flags: 0x0}, + 1048: {region: 0x71, script: 0x52, flags: 0x0}, + 1049: {region: 0x163, script: 0x52, flags: 0x0}, + 1050: {region: 0x164, script: 0x52, flags: 0x0}, + 1051: {region: 0xc2, script: 0x52, flags: 0x0}, + 1052: {region: 0x164, script: 0x52, flags: 0x0}, + 1053: {region: 0x164, script: 0x52, flags: 0x0}, + 1054: {region: 0x164, script: 0x52, flags: 0x0}, + 1055: {region: 0x114, script: 0x52, flags: 0x0}, + 1056: {region: 0x164, script: 0x52, flags: 0x0}, + 1057: {region: 0x164, script: 0x52, flags: 0x0}, + 1058: {region: 0x122, script: 0xd5, flags: 0x0}, + 1059: {region: 0x164, script: 0x52, flags: 0x0}, + 1060: {region: 0x164, script: 0x52, flags: 0x0}, + 1061: {region: 0x164, script: 0x52, flags: 0x0}, + 1062: {region: 0x164, script: 0x52, flags: 0x0}, + 1063: {region: 0x26, script: 0x52, flags: 0x0}, + 1064: {region: 0x39, script: 0x5, flags: 0x1}, + 1065: {region: 0x98, script: 0xc2, flags: 0x0}, + 1066: {region: 0x115, script: 0x52, flags: 0x0}, + 1067: {region: 0x113, script: 0x52, flags: 0x0}, + 1068: {region: 0x98, script: 0x20, flags: 0x0}, + 1069: {region: 0x160, script: 0x52, flags: 0x0}, + 1070: {region: 0x164, script: 0x52, flags: 0x0}, + 1071: {region: 0x164, script: 0x52, flags: 0x0}, + 1072: {region: 0x6c, script: 0x52, flags: 0x0}, + 1073: {region: 0x160, script: 0x52, flags: 0x0}, + 1074: {region: 0x164, script: 0x52, flags: 0x0}, + 1075: {region: 0x5f, script: 0x52, flags: 0x0}, + 1076: {region: 0x94, script: 0x52, flags: 0x0}, + 1077: {region: 0x164, script: 0x52, flags: 0x0}, + 1078: {region: 0x164, script: 0x52, flags: 0x0}, + 1079: {region: 0x12e, script: 0x52, flags: 0x0}, + 1080: {region: 0x164, script: 0x52, flags: 0x0}, + 1081: {region: 0x83, script: 0x52, flags: 0x0}, + 1082: {region: 0x10b, script: 0x52, flags: 0x0}, + 1083: {region: 0x12e, script: 0x52, flags: 0x0}, + 1084: {region: 0x15e, script: 0x5, flags: 0x0}, + 1085: {region: 0x4a, script: 0x52, flags: 0x0}, + 1086: {region: 0x5f, script: 0x52, flags: 0x0}, + 1087: {region: 0x164, script: 0x52, flags: 0x0}, + 1088: {region: 0x98, script: 0x20, flags: 0x0}, + 1089: {region: 0x94, script: 0x52, flags: 0x0}, + 1090: {region: 0x164, script: 0x52, flags: 0x0}, + 1091: {region: 0x34, script: 0xe, flags: 0x0}, + 1092: {region: 0x9a, script: 0xc5, flags: 0x0}, + 1093: {region: 0xe8, script: 0x52, flags: 0x0}, + 1094: {region: 0x98, script: 0xcd, flags: 0x0}, + 1095: {region: 0xda, script: 0x20, flags: 0x0}, + 1096: {region: 0x164, script: 0x52, flags: 0x0}, + 1097: {region: 0x164, script: 0x52, flags: 0x0}, + 1098: {region: 0x164, script: 0x52, flags: 0x0}, + 1099: {region: 0x164, script: 0x52, flags: 0x0}, + 1100: {region: 0x164, script: 0x52, flags: 0x0}, + 1101: {region: 0x164, script: 0x52, flags: 0x0}, + 1102: {region: 0x164, script: 0x52, flags: 0x0}, + 1103: {region: 0x164, script: 0x52, flags: 0x0}, + 1104: {region: 0xe6, script: 0x52, flags: 0x0}, + 1105: {region: 0x164, script: 0x52, flags: 0x0}, + 1106: {region: 0x164, script: 0x52, flags: 0x0}, + 1107: {region: 0x98, script: 0x4a, flags: 0x0}, + 1108: {region: 0x52, script: 0xcb, flags: 0x0}, + 1109: {region: 0xda, script: 0x20, flags: 0x0}, + 1110: {region: 0xda, script: 0x20, flags: 0x0}, + 1111: {region: 0x98, script: 0xd0, flags: 0x0}, + 1112: {region: 0x164, script: 0x52, flags: 0x0}, + 1113: {region: 0x111, script: 0x52, flags: 0x0}, + 1114: {region: 0x130, script: 0x52, flags: 0x0}, + 1115: {region: 0x125, script: 0x52, flags: 0x0}, + 1116: {region: 0x164, script: 0x52, flags: 0x0}, + 1117: {region: 0x3e, script: 0x3, flags: 0x1}, + 1118: {region: 0x164, script: 0x52, flags: 0x0}, + 1119: {region: 0x164, script: 0x52, flags: 0x0}, + 1120: {region: 0x164, script: 0x52, flags: 0x0}, + 1121: {region: 0x122, script: 0xd5, flags: 0x0}, + 1122: {region: 0xda, script: 0x20, flags: 0x0}, + 1123: {region: 0xda, script: 0x20, flags: 0x0}, + 1124: {region: 0xda, script: 0x20, flags: 0x0}, + 1125: {region: 0x6e, script: 0x27, flags: 0x0}, + 1126: {region: 0x164, script: 0x52, flags: 0x0}, + 1127: {region: 0x6c, script: 0x27, flags: 0x0}, + 1128: {region: 0x164, script: 0x52, flags: 0x0}, + 1129: {region: 0x164, script: 0x52, flags: 0x0}, + 1130: {region: 0x164, script: 0x52, flags: 0x0}, + 1131: {region: 0xd5, script: 0x52, flags: 0x0}, + 1132: {region: 0x126, script: 0x52, flags: 0x0}, + 1133: {region: 0x124, script: 0x52, flags: 0x0}, + 1134: {region: 0x31, script: 0x52, flags: 0x0}, + 1135: {region: 0xda, script: 0x20, flags: 0x0}, + 1136: {region: 0xe6, script: 0x52, flags: 0x0}, + 1137: {region: 0x164, script: 0x52, flags: 0x0}, + 1138: {region: 0x164, script: 0x52, flags: 0x0}, + 1139: {region: 0x31, script: 0x52, flags: 0x0}, + 1140: {region: 0xd3, script: 0x52, flags: 0x0}, + 1141: {region: 0x164, script: 0x52, flags: 0x0}, + 1142: {region: 0x160, script: 0x52, flags: 0x0}, + 1143: {region: 0x164, script: 0x52, flags: 0x0}, + 1144: {region: 0x128, script: 0x52, flags: 0x0}, + 1145: {region: 0x164, script: 0x52, flags: 0x0}, + 1146: {region: 0xcd, script: 0x52, flags: 0x0}, + 1147: {region: 0x164, script: 0x52, flags: 0x0}, + 1148: {region: 0xe5, script: 0x52, flags: 0x0}, + 1149: {region: 0x164, script: 0x52, flags: 0x0}, + 1150: {region: 0x164, script: 0x52, flags: 0x0}, + 1151: {region: 0x164, script: 0x52, flags: 0x0}, + 1152: {region: 0x12a, script: 0x52, flags: 0x0}, + 1153: {region: 0x12a, script: 0x52, flags: 0x0}, + 1154: {region: 0x12d, script: 0x52, flags: 0x0}, + 1155: {region: 0x164, script: 0x5, flags: 0x0}, + 1156: {region: 0x160, script: 0x52, flags: 0x0}, + 1157: {region: 0x86, script: 0x2d, flags: 0x0}, + 1158: {region: 0xda, script: 0x20, flags: 0x0}, + 1159: {region: 0xe6, script: 0x52, flags: 0x0}, + 1160: {region: 0x42, script: 0xd6, flags: 0x0}, + 1161: {region: 0x164, script: 0x52, flags: 0x0}, + 1162: {region: 0x105, script: 0x1e, flags: 0x0}, + 1163: {region: 0x164, script: 0x52, flags: 0x0}, + 1164: {region: 0x164, script: 0x52, flags: 0x0}, + 1165: {region: 0x130, script: 0x52, flags: 0x0}, + 1166: {region: 0x164, script: 0x52, flags: 0x0}, + 1167: {region: 0x122, script: 0xd5, flags: 0x0}, + 1168: {region: 0x31, script: 0x52, flags: 0x0}, + 1169: {region: 0x164, script: 0x52, flags: 0x0}, + 1170: {region: 0x164, script: 0x52, flags: 0x0}, + 1171: {region: 0xcd, script: 0x52, flags: 0x0}, + 1172: {region: 0x164, script: 0x52, flags: 0x0}, + 1173: {region: 0x164, script: 0x52, flags: 0x0}, + 1174: {region: 0x12c, script: 0x52, flags: 0x0}, + 1175: {region: 0x164, script: 0x52, flags: 0x0}, + 1177: {region: 0x164, script: 0x52, flags: 0x0}, + 1178: {region: 0xd3, script: 0x52, flags: 0x0}, + 1179: {region: 0x52, script: 0xce, flags: 0x0}, + 1180: {region: 0xe4, script: 0x52, flags: 0x0}, + 1181: {region: 0x164, script: 0x52, flags: 0x0}, + 1182: {region: 0x105, script: 0x1e, flags: 0x0}, + 1183: {region: 0xb9, script: 0x52, flags: 0x0}, + 1184: {region: 0x164, script: 0x52, flags: 0x0}, + 1185: {region: 0x105, script: 0x1e, flags: 0x0}, + 1186: {region: 0x41, script: 0x4, flags: 0x1}, + 1187: {region: 0x11b, script: 0xd8, flags: 0x0}, + 1188: {region: 0x12f, script: 0x1e, flags: 0x0}, + 1189: {region: 0x74, script: 0x52, flags: 0x0}, + 1190: {region: 0x29, script: 0x52, flags: 0x0}, + 1192: {region: 0x45, script: 0x3, flags: 0x1}, + 1193: {region: 0x98, script: 0xe, flags: 0x0}, + 1194: {region: 0xe7, script: 0x5, flags: 0x0}, + 1195: {region: 0x164, script: 0x52, flags: 0x0}, + 1196: {region: 0x164, script: 0x52, flags: 0x0}, + 1197: {region: 0x164, script: 0x52, flags: 0x0}, + 1198: {region: 0x164, script: 0x52, flags: 0x0}, + 1199: {region: 0x164, script: 0x52, flags: 0x0}, + 1200: {region: 0x164, script: 0x52, flags: 0x0}, + 1201: {region: 0x164, script: 0x52, flags: 0x0}, + 1202: {region: 0x48, script: 0x4, flags: 0x1}, + 1203: {region: 0x164, script: 0x52, flags: 0x0}, + 1204: {region: 0xb3, script: 0xd9, flags: 0x0}, + 1205: {region: 0x164, script: 0x52, flags: 0x0}, + 1206: {region: 0x160, script: 0x52, flags: 0x0}, + 1207: {region: 0x9d, script: 0x52, flags: 0x0}, + 1208: {region: 0x105, script: 0x52, flags: 0x0}, + 1209: {region: 0x13d, script: 0x52, flags: 0x0}, + 1210: {region: 0x11a, script: 0x52, flags: 0x0}, + 1211: {region: 0x164, script: 0x52, flags: 0x0}, + 1212: {region: 0x35, script: 0x52, flags: 0x0}, + 1213: {region: 0x5f, script: 0x52, flags: 0x0}, + 1214: {region: 0xd0, script: 0x52, flags: 0x0}, + 1215: {region: 0x1, script: 0x52, flags: 0x0}, + 1216: {region: 0x105, script: 0x52, flags: 0x0}, + 1217: {region: 0x69, script: 0x52, flags: 0x0}, + 1218: {region: 0x12e, script: 0x52, flags: 0x0}, + 1219: {region: 0x164, script: 0x52, flags: 0x0}, + 1220: {region: 0x35, script: 0x52, flags: 0x0}, + 1221: {region: 0x4d, script: 0x52, flags: 0x0}, + 1222: {region: 0x164, script: 0x52, flags: 0x0}, + 1223: {region: 0x6e, script: 0x27, flags: 0x0}, + 1224: {region: 0x164, script: 0x52, flags: 0x0}, + 1225: {region: 0xe6, script: 0x52, flags: 0x0}, + 1226: {region: 0x2e, script: 0x52, flags: 0x0}, + 1227: {region: 0x98, script: 0xd0, flags: 0x0}, + 1228: {region: 0x98, script: 0x20, flags: 0x0}, + 1229: {region: 0x164, script: 0x52, flags: 0x0}, + 1230: {region: 0x164, script: 0x52, flags: 0x0}, + 1231: {region: 0x164, script: 0x52, flags: 0x0}, + 1232: {region: 0x164, script: 0x52, flags: 0x0}, + 1233: {region: 0x164, script: 0x52, flags: 0x0}, + 1234: {region: 0x164, script: 0x52, flags: 0x0}, + 1235: {region: 0x164, script: 0x52, flags: 0x0}, + 1236: {region: 0x164, script: 0x52, flags: 0x0}, + 1237: {region: 0x164, script: 0x52, flags: 0x0}, + 1238: {region: 0x13f, script: 0x52, flags: 0x0}, + 1239: {region: 0x164, script: 0x52, flags: 0x0}, + 1240: {region: 0x164, script: 0x52, flags: 0x0}, + 1241: {region: 0xa7, script: 0x5, flags: 0x0}, + 1242: {region: 0x164, script: 0x52, flags: 0x0}, + 1243: {region: 0x113, script: 0x52, flags: 0x0}, + 1244: {region: 0x164, script: 0x52, flags: 0x0}, + 1245: {region: 0x164, script: 0x52, flags: 0x0}, + 1246: {region: 0x164, script: 0x52, flags: 0x0}, + 1247: {region: 0x164, script: 0x52, flags: 0x0}, + 1248: {region: 0x98, script: 0x20, flags: 0x0}, + 1249: {region: 0x52, script: 0x34, flags: 0x0}, + 1250: {region: 0x164, script: 0x52, flags: 0x0}, + 1251: {region: 0x164, script: 0x52, flags: 0x0}, + 1252: {region: 0x40, script: 0x52, flags: 0x0}, + 1253: {region: 0x164, script: 0x52, flags: 0x0}, + 1254: {region: 0x12a, script: 0x18, flags: 0x0}, + 1255: {region: 0x164, script: 0x52, flags: 0x0}, + 1256: {region: 0x160, script: 0x52, flags: 0x0}, + 1257: {region: 0x164, script: 0x52, flags: 0x0}, + 1258: {region: 0x12a, script: 0x5a, flags: 0x0}, + 1259: {region: 0x12a, script: 0x5b, flags: 0x0}, + 1260: {region: 0x7c, script: 0x29, flags: 0x0}, + 1261: {region: 0x52, script: 0x5e, flags: 0x0}, + 1262: {region: 0x10a, script: 0x62, flags: 0x0}, + 1263: {region: 0x107, script: 0x6c, flags: 0x0}, + 1264: {region: 0x98, script: 0x20, flags: 0x0}, + 1265: {region: 0x130, script: 0x52, flags: 0x0}, + 1266: {region: 0x164, script: 0x52, flags: 0x0}, + 1267: {region: 0x9b, script: 0x82, flags: 0x0}, + 1268: {region: 0x164, script: 0x52, flags: 0x0}, + 1269: {region: 0x15d, script: 0xba, flags: 0x0}, + 1270: {region: 0x164, script: 0x52, flags: 0x0}, + 1271: {region: 0x164, script: 0x52, flags: 0x0}, + 1272: {region: 0xda, script: 0x20, flags: 0x0}, + 1273: {region: 0x164, script: 0x52, flags: 0x0}, + 1274: {region: 0x164, script: 0x52, flags: 0x0}, + 1275: {region: 0xd0, script: 0x52, flags: 0x0}, + 1276: {region: 0x74, script: 0x52, flags: 0x0}, + 1277: {region: 0x164, script: 0x52, flags: 0x0}, + 1278: {region: 0x164, script: 0x52, flags: 0x0}, + 1279: {region: 0x51, script: 0x52, flags: 0x0}, + 1280: {region: 0x164, script: 0x52, flags: 0x0}, + 1281: {region: 0x164, script: 0x52, flags: 0x0}, + 1282: {region: 0x164, script: 0x52, flags: 0x0}, + 1283: {region: 0x51, script: 0x52, flags: 0x0}, + 1284: {region: 0x164, script: 0x52, flags: 0x0}, + 1285: {region: 0x164, script: 0x52, flags: 0x0}, + 1286: {region: 0x164, script: 0x52, flags: 0x0}, + 1287: {region: 0x164, script: 0x52, flags: 0x0}, + 1288: {region: 0x1, script: 0x37, flags: 0x0}, + 1289: {region: 0x164, script: 0x52, flags: 0x0}, + 1290: {region: 0x164, script: 0x52, flags: 0x0}, + 1291: {region: 0x164, script: 0x52, flags: 0x0}, + 1292: {region: 0x164, script: 0x52, flags: 0x0}, + 1293: {region: 0x164, script: 0x52, flags: 0x0}, + 1294: {region: 0xd5, script: 0x52, flags: 0x0}, + 1295: {region: 0x164, script: 0x52, flags: 0x0}, + 1296: {region: 0x164, script: 0x52, flags: 0x0}, + 1297: {region: 0x164, script: 0x52, flags: 0x0}, + 1298: {region: 0x40, script: 0x52, flags: 0x0}, + 1299: {region: 0x164, script: 0x52, flags: 0x0}, + 1300: {region: 0xce, script: 0x52, flags: 0x0}, + 1301: {region: 0x4c, script: 0x3, flags: 0x1}, + 1302: {region: 0x164, script: 0x52, flags: 0x0}, + 1303: {region: 0x164, script: 0x52, flags: 0x0}, + 1304: {region: 0x164, script: 0x52, flags: 0x0}, + 1305: {region: 0x52, script: 0x52, flags: 0x0}, + 1306: {region: 0x10a, script: 0x52, flags: 0x0}, + 1308: {region: 0xa7, script: 0x5, flags: 0x0}, + 1309: {region: 0xd8, script: 0x52, flags: 0x0}, + 1310: {region: 0xb9, script: 0xd2, flags: 0x0}, + 1311: {region: 0x4f, script: 0x14, flags: 0x1}, + 1312: {region: 0x164, script: 0x52, flags: 0x0}, + 1313: {region: 0x121, script: 0x52, flags: 0x0}, + 1314: {region: 0xcf, script: 0x52, flags: 0x0}, + 1315: {region: 0x164, script: 0x52, flags: 0x0}, + 1316: {region: 0x160, script: 0x52, flags: 0x0}, + 1318: {region: 0x12a, script: 0x52, flags: 0x0}, +} + +// likelyLangList holds lists info associated with likelyLang. +// Size: 396 bytes, 99 elements +var likelyLangList = [99]likelyScriptRegion{ + 0: {region: 0x9b, script: 0x7, flags: 0x0}, + 1: {region: 0xa0, script: 0x6d, flags: 0x2}, + 2: {region: 0x11b, script: 0x78, flags: 0x2}, + 3: {region: 0x31, script: 0x52, flags: 0x0}, + 4: {region: 0x9a, script: 0x5, flags: 0x4}, + 5: {region: 0x9b, script: 0x5, flags: 0x4}, + 6: {region: 0x105, script: 0x1e, flags: 0x4}, + 7: {region: 0x9b, script: 0x5, flags: 0x2}, + 8: {region: 0x98, script: 0xe, flags: 0x0}, + 9: {region: 0x34, script: 0x16, flags: 0x2}, + 10: {region: 0x105, script: 0x1e, flags: 0x0}, + 11: {region: 0x37, script: 0x2a, flags: 0x2}, + 12: {region: 0x134, script: 0x52, flags: 0x0}, + 13: {region: 0x7a, script: 0xbd, flags: 0x2}, + 14: {region: 0x113, script: 0x52, flags: 0x0}, + 15: {region: 0x83, script: 0x1, flags: 0x2}, + 16: {region: 0x5c, script: 0x1d, flags: 0x0}, + 17: {region: 0x86, script: 0x57, flags: 0x2}, + 18: {region: 0xd5, script: 0x52, flags: 0x0}, + 19: {region: 0x51, script: 0x5, flags: 0x4}, + 20: {region: 0x10a, script: 0x5, flags: 0x4}, + 21: {region: 0xad, script: 0x1e, flags: 0x0}, + 22: {region: 0x23, script: 0x5, flags: 0x4}, + 23: {region: 0x52, script: 0x5, flags: 0x4}, + 24: {region: 0x9b, script: 0x5, flags: 0x4}, + 25: {region: 0xc4, script: 0x5, flags: 0x4}, + 26: {region: 0x52, script: 0x5, flags: 0x2}, + 27: {region: 0x12a, script: 0x52, flags: 0x0}, + 28: {region: 0xaf, script: 0x5, flags: 0x4}, + 29: {region: 0x9a, script: 0x5, flags: 0x2}, + 30: {region: 0xa4, script: 0x1e, flags: 0x0}, + 31: {region: 0x52, script: 0x5, flags: 0x4}, + 32: {region: 0x12a, script: 0x52, flags: 0x4}, + 33: {region: 0x52, script: 0x5, flags: 0x2}, + 34: {region: 0x12a, script: 0x52, flags: 0x2}, + 35: {region: 0xda, script: 0x20, flags: 0x0}, + 36: {region: 0x98, script: 0x55, flags: 0x2}, + 37: {region: 0x82, script: 0x52, flags: 0x0}, + 38: {region: 0x83, script: 0x70, flags: 0x4}, + 39: {region: 0x83, script: 0x70, flags: 0x2}, + 40: {region: 0xc4, script: 0x1e, flags: 0x0}, + 41: {region: 0x52, script: 0x66, flags: 0x4}, + 42: {region: 0x52, script: 0x66, flags: 0x2}, + 43: {region: 0xcf, script: 0x52, flags: 0x0}, + 44: {region: 0x49, script: 0x5, flags: 0x4}, + 45: {region: 0x94, script: 0x5, flags: 0x4}, + 46: {region: 0x98, script: 0x2f, flags: 0x0}, + 47: {region: 0xe7, script: 0x5, flags: 0x4}, + 48: {region: 0xe7, script: 0x5, flags: 0x2}, + 49: {region: 0x9b, script: 0x7c, flags: 0x0}, + 50: {region: 0x52, script: 0x7d, flags: 0x2}, + 51: {region: 0xb9, script: 0xd2, flags: 0x0}, + 52: {region: 0xd8, script: 0x52, flags: 0x4}, + 53: {region: 0xe7, script: 0x5, flags: 0x0}, + 54: {region: 0x98, script: 0x20, flags: 0x2}, + 55: {region: 0x98, script: 0x47, flags: 0x2}, + 56: {region: 0x98, script: 0xc0, flags: 0x2}, + 57: {region: 0x104, script: 0x1e, flags: 0x0}, + 58: {region: 0xbc, script: 0x52, flags: 0x4}, + 59: {region: 0x103, script: 0x52, flags: 0x4}, + 60: {region: 0x105, script: 0x52, flags: 0x4}, + 61: {region: 0x12a, script: 0x52, flags: 0x4}, + 62: {region: 0x123, script: 0x1e, flags: 0x0}, + 63: {region: 0xe7, script: 0x5, flags: 0x4}, + 64: {region: 0xe7, script: 0x5, flags: 0x2}, + 65: {region: 0x52, script: 0x5, flags: 0x0}, + 66: {region: 0xad, script: 0x1e, flags: 0x4}, + 67: {region: 0xc4, script: 0x1e, flags: 0x4}, + 68: {region: 0xad, script: 0x1e, flags: 0x2}, + 69: {region: 0x98, script: 0xe, flags: 0x0}, + 70: {region: 0xda, script: 0x20, flags: 0x4}, + 71: {region: 0xda, script: 0x20, flags: 0x2}, + 72: {region: 0x136, script: 0x52, flags: 0x0}, + 73: {region: 0x23, script: 0x5, flags: 0x4}, + 74: {region: 0x52, script: 0x1e, flags: 0x4}, + 75: {region: 0x23, script: 0x5, flags: 0x2}, + 76: {region: 0x8c, script: 0x35, flags: 0x0}, + 77: {region: 0x52, script: 0x34, flags: 0x4}, + 78: {region: 0x52, script: 0x34, flags: 0x2}, + 79: {region: 0x52, script: 0x34, flags: 0x0}, + 80: {region: 0x2e, script: 0x35, flags: 0x4}, + 81: {region: 0x3d, script: 0x35, flags: 0x4}, + 82: {region: 0x7a, script: 0x35, flags: 0x4}, + 83: {region: 0x7d, script: 0x35, flags: 0x4}, + 84: {region: 0x8c, script: 0x35, flags: 0x4}, + 85: {region: 0x94, script: 0x35, flags: 0x4}, + 86: {region: 0xc5, script: 0x35, flags: 0x4}, + 87: {region: 0xcf, script: 0x35, flags: 0x4}, + 88: {region: 0xe1, script: 0x35, flags: 0x4}, + 89: {region: 0xe4, script: 0x35, flags: 0x4}, + 90: {region: 0xe6, script: 0x35, flags: 0x4}, + 91: {region: 0x115, script: 0x35, flags: 0x4}, + 92: {region: 0x122, script: 0x35, flags: 0x4}, + 93: {region: 0x12d, script: 0x35, flags: 0x4}, + 94: {region: 0x134, script: 0x35, flags: 0x4}, + 95: {region: 0x13d, script: 0x35, flags: 0x4}, + 96: {region: 0x12d, script: 0x11, flags: 0x2}, + 97: {region: 0x12d, script: 0x30, flags: 0x2}, + 98: {region: 0x12d, script: 0x35, flags: 0x2}, +} + +type likelyLangScript struct { + lang uint16 + script uint8 + flags uint8 +} + +// likelyRegion is a lookup table, indexed by regionID, for the most likely +// languages and scripts given incomplete information. If more entries exist +// for a given regionID, lang and script are the index and size respectively +// of the list in likelyRegionList. +// TODO: exclude containers and user-definable regions from the list. +// Size: 1428 bytes, 357 elements +var likelyRegion = [357]likelyLangScript{ + 33: {lang: 0xd5, script: 0x52, flags: 0x0}, + 34: {lang: 0x39, script: 0x5, flags: 0x0}, + 35: {lang: 0x0, script: 0x2, flags: 0x1}, + 38: {lang: 0x2, script: 0x2, flags: 0x1}, + 39: {lang: 0x4, script: 0x2, flags: 0x1}, + 41: {lang: 0x3b7, script: 0x52, flags: 0x0}, + 42: {lang: 0x0, script: 0x52, flags: 0x0}, + 43: {lang: 0x139, script: 0x52, flags: 0x0}, + 44: {lang: 0x411, script: 0x52, flags: 0x0}, + 45: {lang: 0x109, script: 0x52, flags: 0x0}, + 47: {lang: 0x35e, script: 0x52, flags: 0x0}, + 48: {lang: 0x43a, script: 0x52, flags: 0x0}, + 49: {lang: 0x57, script: 0x52, flags: 0x0}, + 50: {lang: 0x6, script: 0x2, flags: 0x1}, + 52: {lang: 0xa3, script: 0xe, flags: 0x0}, + 53: {lang: 0x35e, script: 0x52, flags: 0x0}, + 54: {lang: 0x159, script: 0x52, flags: 0x0}, + 55: {lang: 0x7d, script: 0x1e, flags: 0x0}, + 56: {lang: 0x39, script: 0x5, flags: 0x0}, + 57: {lang: 0x3d0, script: 0x52, flags: 0x0}, + 58: {lang: 0x159, script: 0x52, flags: 0x0}, + 59: {lang: 0x159, script: 0x52, flags: 0x0}, + 61: {lang: 0x316, script: 0x52, flags: 0x0}, + 62: {lang: 0x139, script: 0x52, flags: 0x0}, + 63: {lang: 0x398, script: 0x52, flags: 0x0}, + 64: {lang: 0x3b7, script: 0x52, flags: 0x0}, + 66: {lang: 0x8, script: 0x2, flags: 0x1}, + 68: {lang: 0x0, script: 0x52, flags: 0x0}, + 70: {lang: 0x70, script: 0x1e, flags: 0x0}, + 72: {lang: 0x508, script: 0x37, flags: 0x2}, + 73: {lang: 0x316, script: 0x5, flags: 0x2}, + 74: {lang: 0x43b, script: 0x52, flags: 0x0}, + 75: {lang: 0x159, script: 0x52, flags: 0x0}, + 76: {lang: 0x159, script: 0x52, flags: 0x0}, + 77: {lang: 0x109, script: 0x52, flags: 0x0}, + 78: {lang: 0x159, script: 0x52, flags: 0x0}, + 80: {lang: 0x139, script: 0x52, flags: 0x0}, + 81: {lang: 0x159, script: 0x52, flags: 0x0}, + 82: {lang: 0xa, script: 0x5, flags: 0x1}, + 83: {lang: 0x139, script: 0x52, flags: 0x0}, + 84: {lang: 0x0, script: 0x52, flags: 0x0}, + 85: {lang: 0x139, script: 0x52, flags: 0x0}, + 88: {lang: 0x139, script: 0x52, flags: 0x0}, + 89: {lang: 0x3b7, script: 0x52, flags: 0x0}, + 90: {lang: 0x398, script: 0x52, flags: 0x0}, + 92: {lang: 0xf, script: 0x2, flags: 0x1}, + 93: {lang: 0xf6, script: 0x52, flags: 0x0}, + 95: {lang: 0x109, script: 0x52, flags: 0x0}, + 97: {lang: 0x1, script: 0x52, flags: 0x0}, + 98: {lang: 0xfd, script: 0x52, flags: 0x0}, + 100: {lang: 0x139, script: 0x52, flags: 0x0}, + 102: {lang: 0x11, script: 0x2, flags: 0x1}, + 103: {lang: 0x139, script: 0x52, flags: 0x0}, + 104: {lang: 0x139, script: 0x52, flags: 0x0}, + 105: {lang: 0x13b, script: 0x52, flags: 0x0}, + 106: {lang: 0x39, script: 0x5, flags: 0x0}, + 107: {lang: 0x39, script: 0x5, flags: 0x0}, + 108: {lang: 0x465, script: 0x27, flags: 0x0}, + 109: {lang: 0x139, script: 0x52, flags: 0x0}, + 110: {lang: 0x13, script: 0x2, flags: 0x1}, + 112: {lang: 0x109, script: 0x52, flags: 0x0}, + 113: {lang: 0x14c, script: 0x52, flags: 0x0}, + 114: {lang: 0x1b9, script: 0x20, flags: 0x2}, + 117: {lang: 0x153, script: 0x52, flags: 0x0}, + 119: {lang: 0x159, script: 0x52, flags: 0x0}, + 121: {lang: 0x159, script: 0x52, flags: 0x0}, + 122: {lang: 0x15, script: 0x2, flags: 0x1}, + 124: {lang: 0x17, script: 0x3, flags: 0x1}, + 125: {lang: 0x159, script: 0x52, flags: 0x0}, + 127: {lang: 0x20, script: 0x52, flags: 0x0}, + 129: {lang: 0x23d, script: 0x52, flags: 0x0}, + 131: {lang: 0x159, script: 0x52, flags: 0x0}, + 132: {lang: 0x159, script: 0x52, flags: 0x0}, + 133: {lang: 0x139, script: 0x52, flags: 0x0}, + 134: {lang: 0x1a, script: 0x2, flags: 0x1}, + 135: {lang: 0x0, script: 0x52, flags: 0x0}, + 136: {lang: 0x139, script: 0x52, flags: 0x0}, + 138: {lang: 0x3b7, script: 0x52, flags: 0x0}, + 140: {lang: 0x51f, script: 0x35, flags: 0x0}, + 141: {lang: 0x0, script: 0x52, flags: 0x0}, + 142: {lang: 0x139, script: 0x52, flags: 0x0}, + 143: {lang: 0x1ca, script: 0x52, flags: 0x0}, + 144: {lang: 0x1cd, script: 0x52, flags: 0x0}, + 145: {lang: 0x1ce, script: 0x52, flags: 0x0}, + 147: {lang: 0x139, script: 0x52, flags: 0x0}, + 148: {lang: 0x1c, script: 0x2, flags: 0x1}, + 150: {lang: 0x1b5, script: 0x37, flags: 0x0}, + 152: {lang: 0x1e, script: 0x3, flags: 0x1}, + 154: {lang: 0x39, script: 0x5, flags: 0x0}, + 155: {lang: 0x21, script: 0x2, flags: 0x1}, + 156: {lang: 0x1f0, script: 0x52, flags: 0x0}, + 157: {lang: 0x1f1, script: 0x52, flags: 0x0}, + 160: {lang: 0x39, script: 0x5, flags: 0x0}, + 161: {lang: 0x1f8, script: 0x41, flags: 0x0}, + 163: {lang: 0x43b, script: 0x52, flags: 0x0}, + 164: {lang: 0x281, script: 0x1e, flags: 0x0}, + 165: {lang: 0x23, script: 0x3, flags: 0x1}, + 167: {lang: 0x26, script: 0x2, flags: 0x1}, + 169: {lang: 0x24b, script: 0x4b, flags: 0x0}, + 170: {lang: 0x24b, script: 0x4b, flags: 0x0}, + 171: {lang: 0x39, script: 0x5, flags: 0x0}, + 173: {lang: 0x3d9, script: 0x1e, flags: 0x0}, + 174: {lang: 0x28, script: 0x2, flags: 0x1}, + 175: {lang: 0x39, script: 0x5, flags: 0x0}, + 177: {lang: 0x109, script: 0x52, flags: 0x0}, + 178: {lang: 0x402, script: 0xc1, flags: 0x0}, + 180: {lang: 0x431, script: 0x52, flags: 0x0}, + 181: {lang: 0x2b7, script: 0x52, flags: 0x0}, + 182: {lang: 0x159, script: 0x52, flags: 0x0}, + 183: {lang: 0x2be, script: 0x52, flags: 0x0}, + 184: {lang: 0x39, script: 0x5, flags: 0x0}, + 185: {lang: 0x2a, script: 0x2, flags: 0x1}, + 186: {lang: 0x159, script: 0x52, flags: 0x0}, + 187: {lang: 0x2c, script: 0x2, flags: 0x1}, + 188: {lang: 0x428, script: 0x52, flags: 0x0}, + 189: {lang: 0x159, script: 0x52, flags: 0x0}, + 190: {lang: 0x2e8, script: 0x52, flags: 0x0}, + 193: {lang: 0x2e, script: 0x2, flags: 0x1}, + 194: {lang: 0x9e, script: 0x52, flags: 0x0}, + 195: {lang: 0x30, script: 0x2, flags: 0x1}, + 196: {lang: 0x32, script: 0x2, flags: 0x1}, + 197: {lang: 0x34, script: 0x2, flags: 0x1}, + 199: {lang: 0x159, script: 0x52, flags: 0x0}, + 200: {lang: 0x36, script: 0x2, flags: 0x1}, + 202: {lang: 0x317, script: 0x52, flags: 0x0}, + 203: {lang: 0x38, script: 0x3, flags: 0x1}, + 204: {lang: 0x124, script: 0xd4, flags: 0x0}, + 206: {lang: 0x139, script: 0x52, flags: 0x0}, + 207: {lang: 0x316, script: 0x52, flags: 0x0}, + 208: {lang: 0x3b7, script: 0x52, flags: 0x0}, + 209: {lang: 0x15, script: 0x52, flags: 0x0}, + 210: {lang: 0x159, script: 0x52, flags: 0x0}, + 211: {lang: 0x1ad, script: 0x52, flags: 0x0}, + 213: {lang: 0x1ad, script: 0x5, flags: 0x2}, + 215: {lang: 0x139, script: 0x52, flags: 0x0}, + 216: {lang: 0x35e, script: 0x52, flags: 0x0}, + 217: {lang: 0x33e, script: 0x52, flags: 0x0}, + 218: {lang: 0x348, script: 0x20, flags: 0x0}, + 224: {lang: 0x39, script: 0x5, flags: 0x0}, + 225: {lang: 0x139, script: 0x52, flags: 0x0}, + 227: {lang: 0x139, script: 0x52, flags: 0x0}, + 228: {lang: 0x159, script: 0x52, flags: 0x0}, + 229: {lang: 0x47c, script: 0x52, flags: 0x0}, + 230: {lang: 0x14e, script: 0x52, flags: 0x0}, + 231: {lang: 0x3b, script: 0x3, flags: 0x1}, + 232: {lang: 0x3e, script: 0x2, flags: 0x1}, + 233: {lang: 0x159, script: 0x52, flags: 0x0}, + 235: {lang: 0x139, script: 0x52, flags: 0x0}, + 236: {lang: 0x39, script: 0x5, flags: 0x0}, + 237: {lang: 0x3b7, script: 0x52, flags: 0x0}, + 239: {lang: 0x399, script: 0x52, flags: 0x0}, + 240: {lang: 0x18e, script: 0x52, flags: 0x0}, + 242: {lang: 0x39, script: 0x5, flags: 0x0}, + 257: {lang: 0x159, script: 0x52, flags: 0x0}, + 259: {lang: 0x40, script: 0x2, flags: 0x1}, + 260: {lang: 0x428, script: 0x1e, flags: 0x0}, + 261: {lang: 0x42, script: 0x2, flags: 0x1}, + 262: {lang: 0x3dc, script: 0x52, flags: 0x0}, + 263: {lang: 0x39, script: 0x5, flags: 0x0}, + 265: {lang: 0x159, script: 0x52, flags: 0x0}, + 266: {lang: 0x39, script: 0x5, flags: 0x0}, + 267: {lang: 0x44, script: 0x2, flags: 0x1}, + 270: {lang: 0x40c, script: 0x52, flags: 0x0}, + 271: {lang: 0x33e, script: 0x52, flags: 0x0}, + 272: {lang: 0x46, script: 0x2, flags: 0x1}, + 274: {lang: 0x1f1, script: 0x52, flags: 0x0}, + 275: {lang: 0x159, script: 0x52, flags: 0x0}, + 276: {lang: 0x41f, script: 0x52, flags: 0x0}, + 277: {lang: 0x35e, script: 0x52, flags: 0x0}, + 279: {lang: 0x3b7, script: 0x52, flags: 0x0}, + 281: {lang: 0x139, script: 0x52, flags: 0x0}, + 283: {lang: 0x48, script: 0x2, flags: 0x1}, + 287: {lang: 0x159, script: 0x52, flags: 0x0}, + 288: {lang: 0x159, script: 0x52, flags: 0x0}, + 289: {lang: 0x4a, script: 0x2, flags: 0x1}, + 290: {lang: 0x4c, script: 0x3, flags: 0x1}, + 291: {lang: 0x4f, script: 0x2, flags: 0x1}, + 292: {lang: 0x46d, script: 0x52, flags: 0x0}, + 293: {lang: 0x3b7, script: 0x52, flags: 0x0}, + 294: {lang: 0x46c, script: 0x52, flags: 0x0}, + 295: {lang: 0x51, script: 0x2, flags: 0x1}, + 296: {lang: 0x478, script: 0x52, flags: 0x0}, + 298: {lang: 0x53, script: 0x4, flags: 0x1}, + 300: {lang: 0x496, script: 0x52, flags: 0x0}, + 301: {lang: 0x57, script: 0x2, flags: 0x1}, + 302: {lang: 0x43b, script: 0x52, flags: 0x0}, + 303: {lang: 0x59, script: 0x3, flags: 0x1}, + 304: {lang: 0x43b, script: 0x52, flags: 0x0}, + 308: {lang: 0x508, script: 0x37, flags: 0x2}, + 309: {lang: 0x139, script: 0x52, flags: 0x0}, + 310: {lang: 0x4b2, script: 0x52, flags: 0x0}, + 311: {lang: 0x1f1, script: 0x52, flags: 0x0}, + 314: {lang: 0x139, script: 0x52, flags: 0x0}, + 317: {lang: 0x4b9, script: 0x52, flags: 0x0}, + 318: {lang: 0x89, script: 0x52, flags: 0x0}, + 319: {lang: 0x159, script: 0x52, flags: 0x0}, + 321: {lang: 0x411, script: 0x52, flags: 0x0}, + 332: {lang: 0x5c, script: 0x2, flags: 0x1}, + 349: {lang: 0x39, script: 0x5, flags: 0x0}, + 350: {lang: 0x5e, script: 0x2, flags: 0x1}, + 355: {lang: 0x419, script: 0x52, flags: 0x0}, +} + +// likelyRegionList holds lists info associated with likelyRegion. +// Size: 384 bytes, 96 elements +var likelyRegionList = [96]likelyLangScript{ + 0: {lang: 0x143, script: 0x5, flags: 0x0}, + 1: {lang: 0x46c, script: 0x52, flags: 0x0}, + 2: {lang: 0x427, script: 0x52, flags: 0x0}, + 3: {lang: 0x2f6, script: 0x1e, flags: 0x0}, + 4: {lang: 0x1d0, script: 0x8, flags: 0x0}, + 5: {lang: 0x26b, script: 0x52, flags: 0x0}, + 6: {lang: 0xb5, script: 0x52, flags: 0x0}, + 7: {lang: 0x428, script: 0x1e, flags: 0x0}, + 8: {lang: 0x129, script: 0xd6, flags: 0x0}, + 9: {lang: 0x348, script: 0x20, flags: 0x0}, + 10: {lang: 0x51f, script: 0x34, flags: 0x0}, + 11: {lang: 0x4a2, script: 0x5, flags: 0x0}, + 12: {lang: 0x515, script: 0x35, flags: 0x0}, + 13: {lang: 0x519, script: 0x52, flags: 0x0}, + 14: {lang: 0x291, script: 0xd5, flags: 0x0}, + 15: {lang: 0x131, script: 0x2d, flags: 0x0}, + 16: {lang: 0x480, script: 0x52, flags: 0x0}, + 17: {lang: 0x39, script: 0x5, flags: 0x0}, + 18: {lang: 0x159, script: 0x52, flags: 0x0}, + 19: {lang: 0x26, script: 0x27, flags: 0x0}, + 20: {lang: 0x134, script: 0x52, flags: 0x0}, + 21: {lang: 0x261, script: 0x5, flags: 0x2}, + 22: {lang: 0x508, script: 0x37, flags: 0x2}, + 23: {lang: 0x208, script: 0x29, flags: 0x0}, + 24: {lang: 0x5, script: 0x1e, flags: 0x0}, + 25: {lang: 0x26b, script: 0x52, flags: 0x0}, + 26: {lang: 0x131, script: 0x2d, flags: 0x0}, + 27: {lang: 0x2f6, script: 0x1e, flags: 0x0}, + 28: {lang: 0x1da, script: 0x52, flags: 0x0}, + 29: {lang: 0x316, script: 0x5, flags: 0x0}, + 30: {lang: 0x1b7, script: 0x20, flags: 0x0}, + 31: {lang: 0x4aa, script: 0x5, flags: 0x0}, + 32: {lang: 0x22e, script: 0x6b, flags: 0x0}, + 33: {lang: 0x143, script: 0x5, flags: 0x0}, + 34: {lang: 0x46c, script: 0x52, flags: 0x0}, + 35: {lang: 0x242, script: 0x46, flags: 0x0}, + 36: {lang: 0xe4, script: 0x5, flags: 0x0}, + 37: {lang: 0x21e, script: 0xd5, flags: 0x0}, + 38: {lang: 0x39, script: 0x5, flags: 0x0}, + 39: {lang: 0x159, script: 0x52, flags: 0x0}, + 40: {lang: 0x2af, script: 0x4f, flags: 0x0}, + 41: {lang: 0x21e, script: 0xd5, flags: 0x0}, + 42: {lang: 0x39, script: 0x5, flags: 0x0}, + 43: {lang: 0x159, script: 0x52, flags: 0x0}, + 44: {lang: 0x3d3, script: 0x52, flags: 0x0}, + 45: {lang: 0x4a4, script: 0x1e, flags: 0x0}, + 46: {lang: 0x2f6, script: 0x1e, flags: 0x0}, + 47: {lang: 0x427, script: 0x52, flags: 0x0}, + 48: {lang: 0x328, script: 0x6b, flags: 0x0}, + 49: {lang: 0x20b, script: 0x52, flags: 0x0}, + 50: {lang: 0x302, script: 0x1e, flags: 0x0}, + 51: {lang: 0x23a, script: 0x5, flags: 0x0}, + 52: {lang: 0x51f, script: 0x35, flags: 0x0}, + 53: {lang: 0x3b7, script: 0x52, flags: 0x0}, + 54: {lang: 0x39, script: 0x5, flags: 0x0}, + 55: {lang: 0x159, script: 0x52, flags: 0x0}, + 56: {lang: 0x2e4, script: 0x52, flags: 0x0}, + 57: {lang: 0x4aa, script: 0x5, flags: 0x0}, + 58: {lang: 0x87, script: 0x20, flags: 0x0}, + 59: {lang: 0x4aa, script: 0x5, flags: 0x0}, + 60: {lang: 0x4aa, script: 0x5, flags: 0x0}, + 61: {lang: 0xbc, script: 0x20, flags: 0x0}, + 62: {lang: 0x3aa, script: 0x52, flags: 0x0}, + 63: {lang: 0x70, script: 0x1e, flags: 0x0}, + 64: {lang: 0x3d3, script: 0x52, flags: 0x0}, + 65: {lang: 0x7d, script: 0x1e, flags: 0x0}, + 66: {lang: 0x3d9, script: 0x1e, flags: 0x0}, + 67: {lang: 0x25e, script: 0x52, flags: 0x0}, + 68: {lang: 0x43a, script: 0x52, flags: 0x0}, + 69: {lang: 0x508, script: 0x37, flags: 0x0}, + 70: {lang: 0x408, script: 0x52, flags: 0x0}, + 71: {lang: 0x4a4, script: 0x1e, flags: 0x0}, + 72: {lang: 0x39, script: 0x5, flags: 0x0}, + 73: {lang: 0x159, script: 0x52, flags: 0x0}, + 74: {lang: 0x159, script: 0x52, flags: 0x0}, + 75: {lang: 0x34, script: 0x5, flags: 0x0}, + 76: {lang: 0x461, script: 0xd5, flags: 0x0}, + 77: {lang: 0x2e3, script: 0x5, flags: 0x0}, + 78: {lang: 0x306, script: 0x6b, flags: 0x0}, + 79: {lang: 0x45d, script: 0x1e, flags: 0x0}, + 80: {lang: 0x143, script: 0x5, flags: 0x0}, + 81: {lang: 0x39, script: 0x5, flags: 0x0}, + 82: {lang: 0x159, script: 0x52, flags: 0x0}, + 83: {lang: 0x480, script: 0x52, flags: 0x0}, + 84: {lang: 0x57, script: 0x5, flags: 0x0}, + 85: {lang: 0x211, script: 0x1e, flags: 0x0}, + 86: {lang: 0x80, script: 0x2d, flags: 0x0}, + 87: {lang: 0x51f, script: 0x35, flags: 0x0}, + 88: {lang: 0x482, script: 0x52, flags: 0x0}, + 89: {lang: 0x4a4, script: 0x1e, flags: 0x0}, + 90: {lang: 0x508, script: 0x37, flags: 0x0}, + 91: {lang: 0x3aa, script: 0x52, flags: 0x0}, + 92: {lang: 0x427, script: 0x52, flags: 0x0}, + 93: {lang: 0x428, script: 0x1e, flags: 0x0}, + 94: {lang: 0x159, script: 0x52, flags: 0x0}, + 95: {lang: 0x43c, script: 0x5, flags: 0x0}, +} + +type likelyTag struct { + lang uint16 + region uint16 + script uint8 +} + +// Size: 192 bytes, 32 elements +var likelyRegionGroup = [32]likelyTag{ + 1: {lang: 0x134, region: 0xd5, script: 0x52}, + 2: {lang: 0x134, region: 0x134, script: 0x52}, + 3: {lang: 0x3b7, region: 0x40, script: 0x52}, + 4: {lang: 0x134, region: 0x2e, script: 0x52}, + 5: {lang: 0x134, region: 0xd5, script: 0x52}, + 6: {lang: 0x139, region: 0xce, script: 0x52}, + 7: {lang: 0x43b, region: 0x12e, script: 0x52}, + 8: {lang: 0x39, region: 0x6a, script: 0x5}, + 9: {lang: 0x43b, region: 0x4a, script: 0x52}, + 10: {lang: 0x134, region: 0x160, script: 0x52}, + 11: {lang: 0x134, region: 0x134, script: 0x52}, + 12: {lang: 0x134, region: 0x134, script: 0x52}, + 13: {lang: 0x139, region: 0x58, script: 0x52}, + 14: {lang: 0x51f, region: 0x52, script: 0x34}, + 15: {lang: 0x1b7, region: 0x98, script: 0x20}, + 16: {lang: 0x1da, region: 0x94, script: 0x52}, + 17: {lang: 0x1f1, region: 0x9d, script: 0x52}, + 18: {lang: 0x134, region: 0x2e, script: 0x52}, + 19: {lang: 0x134, region: 0xe5, script: 0x52}, + 20: {lang: 0x134, region: 0x89, script: 0x52}, + 21: {lang: 0x411, region: 0x141, script: 0x52}, + 22: {lang: 0x51f, region: 0x52, script: 0x34}, + 23: {lang: 0x4b2, region: 0x136, script: 0x52}, + 24: {lang: 0x39, region: 0x107, script: 0x5}, + 25: {lang: 0x3d9, region: 0x105, script: 0x1e}, + 26: {lang: 0x3d9, region: 0x105, script: 0x1e}, + 27: {lang: 0x134, region: 0x7a, script: 0x52}, + 28: {lang: 0x109, region: 0x5f, script: 0x52}, + 29: {lang: 0x139, region: 0x1e, script: 0x52}, + 30: {lang: 0x134, region: 0x99, script: 0x52}, + 31: {lang: 0x134, region: 0x7a, script: 0x52}, +} + +type mutualIntelligibility struct { + want uint16 + have uint16 + conf uint8 + oneway bool +} + +type scriptIntelligibility struct { + lang uint16 + want uint8 + have uint8 + conf uint8 +} + +// matchLang holds pairs of langIDs of base languages that are typically +// mutually intelligible. Each pair is associated with a confidence and +// whether the intelligibility goes one or both ways. +// Size: 708 bytes, 118 elements +var matchLang = [118]mutualIntelligibility{ + 0: {want: 0x366, have: 0x33e, conf: 0x2, oneway: false}, + 1: {want: 0x26b, have: 0xe7, conf: 0x2, oneway: false}, + 2: {want: 0x1ca, have: 0xb5, conf: 0x2, oneway: false}, + 3: {want: 0x3fd, have: 0xb5, conf: 0x2, oneway: false}, + 4: {want: 0x428, have: 0xb5, conf: 0x2, oneway: false}, + 5: {want: 0x3fd, have: 0x1ca, conf: 0x2, oneway: false}, + 6: {want: 0x428, have: 0x1ca, conf: 0x2, oneway: false}, + 7: {want: 0x3fd, have: 0x428, conf: 0x2, oneway: false}, + 8: {want: 0x430, have: 0x1, conf: 0x2, oneway: false}, + 9: {want: 0x19c, have: 0x109, conf: 0x2, oneway: true}, + 10: {want: 0x28c, have: 0x109, conf: 0x2, oneway: true}, + 11: {want: 0xfd, have: 0x366, conf: 0x2, oneway: false}, + 12: {want: 0xfd, have: 0x33e, conf: 0x2, oneway: false}, + 13: {want: 0xe7, have: 0x26b, conf: 0x2, oneway: false}, + 14: {want: 0x5, have: 0x3d9, conf: 0x2, oneway: true}, + 15: {want: 0xc, have: 0x134, conf: 0x2, oneway: true}, + 16: {want: 0x15, have: 0x35e, conf: 0x2, oneway: true}, + 17: {want: 0x20, have: 0x134, conf: 0x2, oneway: true}, + 18: {want: 0x55, have: 0x139, conf: 0x2, oneway: true}, + 19: {want: 0x57, have: 0x3d9, conf: 0x2, oneway: true}, + 20: {want: 0x70, have: 0x3d9, conf: 0x2, oneway: true}, + 21: {want: 0x74, have: 0x134, conf: 0x2, oneway: true}, + 22: {want: 0x81, have: 0x1b7, conf: 0x2, oneway: true}, + 23: {want: 0xa3, have: 0x134, conf: 0x2, oneway: true}, + 24: {want: 0xb0, have: 0x159, conf: 0x2, oneway: true}, + 25: {want: 0xdb, have: 0x14e, conf: 0x2, oneway: true}, + 26: {want: 0xe3, have: 0x134, conf: 0x2, oneway: true}, + 27: {want: 0xe7, have: 0x39, conf: 0x2, oneway: true}, + 28: {want: 0xed, have: 0x159, conf: 0x2, oneway: true}, + 29: {want: 0xf5, have: 0x159, conf: 0x2, oneway: true}, + 30: {want: 0xfc, have: 0x134, conf: 0x2, oneway: true}, + 31: {want: 0x12c, have: 0x134, conf: 0x2, oneway: true}, + 32: {want: 0x137, have: 0x134, conf: 0x2, oneway: true}, + 33: {want: 0x13b, have: 0x14c, conf: 0x2, oneway: true}, + 34: {want: 0x140, have: 0x139, conf: 0x2, oneway: true}, + 35: {want: 0x153, have: 0xfd, conf: 0x2, oneway: true}, + 36: {want: 0x168, have: 0x35e, conf: 0x2, oneway: true}, + 37: {want: 0x169, have: 0x134, conf: 0x2, oneway: true}, + 38: {want: 0x16a, have: 0x134, conf: 0x2, oneway: true}, + 39: {want: 0x178, have: 0x134, conf: 0x2, oneway: true}, + 40: {want: 0x18a, have: 0x139, conf: 0x2, oneway: true}, + 41: {want: 0x18e, have: 0x139, conf: 0x2, oneway: true}, + 42: {want: 0x19d, have: 0x1b7, conf: 0x2, oneway: true}, + 43: {want: 0x1ad, have: 0x134, conf: 0x2, oneway: true}, + 44: {want: 0x1b1, have: 0x134, conf: 0x2, oneway: true}, + 45: {want: 0x1cd, have: 0x159, conf: 0x2, oneway: true}, + 46: {want: 0x1d0, have: 0x3d9, conf: 0x2, oneway: true}, + 47: {want: 0x1d2, have: 0x134, conf: 0x2, oneway: true}, + 48: {want: 0x1df, have: 0x134, conf: 0x2, oneway: true}, + 49: {want: 0x1f0, have: 0x134, conf: 0x2, oneway: true}, + 50: {want: 0x206, have: 0x1da, conf: 0x2, oneway: true}, + 51: {want: 0x208, have: 0x134, conf: 0x2, oneway: true}, + 52: {want: 0x225, have: 0x159, conf: 0x2, oneway: true}, + 53: {want: 0x23a, have: 0x3d9, conf: 0x2, oneway: true}, + 54: {want: 0x242, have: 0x134, conf: 0x2, oneway: true}, + 55: {want: 0x249, have: 0x134, conf: 0x2, oneway: true}, + 56: {want: 0x25c, have: 0x134, conf: 0x2, oneway: true}, + 57: {want: 0x26b, have: 0x480, conf: 0x2, oneway: true}, + 58: {want: 0x281, have: 0x3d9, conf: 0x2, oneway: true}, + 59: {want: 0x285, have: 0x1f1, conf: 0x2, oneway: true}, + 60: {want: 0x29a, have: 0x134, conf: 0x2, oneway: true}, + 61: {want: 0x2ac, have: 0x159, conf: 0x2, oneway: true}, + 62: {want: 0x2af, have: 0x134, conf: 0x2, oneway: true}, + 63: {want: 0x2b5, have: 0x134, conf: 0x2, oneway: true}, + 64: {want: 0x2ba, have: 0x159, conf: 0x2, oneway: true}, + 65: {want: 0x2e4, have: 0x134, conf: 0x2, oneway: true}, + 66: {want: 0x2e8, have: 0x159, conf: 0x2, oneway: true}, + 67: {want: 0x2f1, have: 0x134, conf: 0x2, oneway: true}, + 68: {want: 0x2f6, have: 0x7d, conf: 0x2, oneway: true}, + 69: {want: 0x2fb, have: 0x134, conf: 0x2, oneway: true}, + 70: {want: 0x302, have: 0x3d9, conf: 0x2, oneway: true}, + 71: {want: 0x312, have: 0x1b7, conf: 0x2, oneway: true}, + 72: {want: 0x316, have: 0x1da, conf: 0x2, oneway: true}, + 73: {want: 0x317, have: 0x134, conf: 0x2, oneway: true}, + 74: {want: 0x328, have: 0x134, conf: 0x2, oneway: true}, + 75: {want: 0x348, have: 0x134, conf: 0x2, oneway: true}, + 76: {want: 0x361, have: 0x33e, conf: 0x2, oneway: false}, + 77: {want: 0x361, have: 0x366, conf: 0x2, oneway: true}, + 78: {want: 0x371, have: 0x134, conf: 0x2, oneway: true}, + 79: {want: 0x37e, have: 0x134, conf: 0x2, oneway: true}, + 80: {want: 0x380, have: 0x134, conf: 0x2, oneway: true}, + 81: {want: 0x382, have: 0x159, conf: 0x2, oneway: true}, + 82: {want: 0x387, have: 0x134, conf: 0x2, oneway: true}, + 83: {want: 0x38c, have: 0x134, conf: 0x2, oneway: true}, + 84: {want: 0x394, have: 0x134, conf: 0x2, oneway: true}, + 85: {want: 0x39c, have: 0x134, conf: 0x2, oneway: true}, + 86: {want: 0x3b5, have: 0x134, conf: 0x2, oneway: true}, + 87: {want: 0x3bb, have: 0x139, conf: 0x2, oneway: true}, + 88: {want: 0x3cb, have: 0x109, conf: 0x2, oneway: true}, + 89: {want: 0x3d0, have: 0x134, conf: 0x2, oneway: true}, + 90: {want: 0x3dc, have: 0x159, conf: 0x2, oneway: true}, + 91: {want: 0x3e0, have: 0x1b7, conf: 0x2, oneway: true}, + 92: {want: 0x3f0, have: 0x134, conf: 0x2, oneway: true}, + 93: {want: 0x402, have: 0x134, conf: 0x2, oneway: true}, + 94: {want: 0x419, have: 0x134, conf: 0x2, oneway: true}, + 95: {want: 0x41f, have: 0x134, conf: 0x2, oneway: true}, + 96: {want: 0x427, have: 0x134, conf: 0x2, oneway: true}, + 97: {want: 0x431, have: 0x134, conf: 0x2, oneway: true}, + 98: {want: 0x434, have: 0x1da, conf: 0x2, oneway: true}, + 99: {want: 0x43b, have: 0x134, conf: 0x2, oneway: true}, + 100: {want: 0x446, have: 0x134, conf: 0x2, oneway: true}, + 101: {want: 0x457, have: 0x134, conf: 0x2, oneway: true}, + 102: {want: 0x45d, have: 0x3d9, conf: 0x2, oneway: true}, + 103: {want: 0x465, have: 0x134, conf: 0x2, oneway: true}, + 104: {want: 0x46c, have: 0x3d9, conf: 0x2, oneway: true}, + 105: {want: 0x3878, have: 0x134, conf: 0x2, oneway: true}, + 106: {want: 0x476, have: 0x134, conf: 0x2, oneway: true}, + 107: {want: 0x478, have: 0x134, conf: 0x2, oneway: true}, + 108: {want: 0x48a, have: 0x3d9, conf: 0x2, oneway: true}, + 109: {want: 0x493, have: 0x134, conf: 0x2, oneway: true}, + 110: {want: 0x4a2, have: 0x51f, conf: 0x2, oneway: true}, + 111: {want: 0x4aa, have: 0x134, conf: 0x2, oneway: true}, + 112: {want: 0x4b2, have: 0x3d9, conf: 0x2, oneway: true}, + 113: {want: 0x4db, have: 0x159, conf: 0x2, oneway: true}, + 114: {want: 0x4e8, have: 0x134, conf: 0x2, oneway: true}, + 115: {want: 0x508, have: 0x134, conf: 0x2, oneway: true}, + 116: {want: 0x50e, have: 0x134, conf: 0x2, oneway: true}, + 117: {want: 0x524, have: 0x134, conf: 0x2, oneway: true}, +} + +// matchScript holds pairs of scriptIDs where readers of one script +// can typically also read the other. Each is associated with a confidence. +// Size: 24 bytes, 4 elements +var matchScript = [4]scriptIntelligibility{ + 0: {lang: 0x428, want: 0x52, have: 0x1e, conf: 0x2}, + 1: {lang: 0x428, want: 0x1e, have: 0x52, conf: 0x2}, + 2: {lang: 0x0, want: 0x34, have: 0x35, conf: 0x1}, + 3: {lang: 0x0, want: 0x35, have: 0x34, conf: 0x1}, +} + +// Size: 128 bytes, 32 elements +var regionContainment = [32]uint32{ + 0xffffffff, 0x000007a2, 0x00003044, 0x00000008, + 0x403c0010, 0x00000020, 0x00000040, 0x00000080, + 0x00000100, 0x00000200, 0x00000400, 0x2000384c, + 0x00001000, 0x00002000, 0x00004000, 0x00008000, + 0x00010000, 0x00020000, 0x00040000, 0x00080000, + 0x00100000, 0x00200000, 0x01c1c000, 0x00800000, + 0x01000000, 0x1e020000, 0x04000000, 0x08000000, + 0x10000000, 0x20002048, 0x40000000, 0x80000000, +} + +// regionInclusion maps region identifiers to sets of regions in regionInclusionBits, +// where each set holds all groupings that are directly connected in a region +// containment graph. +// Size: 357 bytes, 357 elements +var regionInclusion = [357]uint8{ + // Entry 0 - 3F + 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, + 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x25, 0x22, 0x23, + 0x25, 0x26, 0x21, 0x27, 0x28, 0x29, 0x2a, 0x25, + 0x2b, 0x23, 0x22, 0x25, 0x24, 0x29, 0x2c, 0x2d, + 0x23, 0x2e, 0x2c, 0x25, 0x2f, 0x30, 0x27, 0x25, + // Entry 40 - 7F + 0x27, 0x25, 0x24, 0x30, 0x21, 0x31, 0x32, 0x33, + 0x2f, 0x21, 0x26, 0x26, 0x26, 0x34, 0x2c, 0x28, + 0x27, 0x26, 0x35, 0x27, 0x21, 0x33, 0x22, 0x20, + 0x25, 0x2c, 0x25, 0x21, 0x36, 0x2d, 0x34, 0x29, + 0x21, 0x2e, 0x37, 0x25, 0x25, 0x20, 0x38, 0x38, + 0x27, 0x37, 0x38, 0x38, 0x2e, 0x39, 0x2e, 0x1f, + 0x20, 0x37, 0x3a, 0x27, 0x3b, 0x2b, 0x20, 0x29, + 0x34, 0x26, 0x37, 0x25, 0x23, 0x27, 0x2b, 0x2c, + // Entry 80 - BF + 0x22, 0x2f, 0x2c, 0x2c, 0x25, 0x26, 0x39, 0x21, + 0x33, 0x3b, 0x2c, 0x27, 0x35, 0x21, 0x33, 0x39, + 0x25, 0x2d, 0x20, 0x38, 0x30, 0x37, 0x23, 0x2b, + 0x24, 0x21, 0x23, 0x24, 0x2b, 0x39, 0x2b, 0x25, + 0x23, 0x35, 0x20, 0x2e, 0x3c, 0x30, 0x3b, 0x2e, + 0x25, 0x35, 0x35, 0x23, 0x25, 0x3c, 0x30, 0x23, + 0x25, 0x34, 0x24, 0x2c, 0x31, 0x37, 0x29, 0x37, + 0x38, 0x38, 0x34, 0x32, 0x22, 0x25, 0x2e, 0x3b, + // Entry C0 - FF + 0x20, 0x22, 0x2c, 0x30, 0x35, 0x35, 0x3b, 0x25, + 0x2c, 0x25, 0x39, 0x2e, 0x24, 0x2e, 0x33, 0x30, + 0x2e, 0x31, 0x3a, 0x2c, 0x2a, 0x2c, 0x20, 0x33, + 0x29, 0x2b, 0x24, 0x20, 0x3b, 0x23, 0x28, 0x2a, + 0x23, 0x33, 0x20, 0x27, 0x28, 0x3a, 0x30, 0x24, + 0x2d, 0x2f, 0x28, 0x25, 0x23, 0x39, 0x20, 0x3b, + 0x27, 0x20, 0x23, 0x20, 0x20, 0x1e, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + // Entry 100 - 13F + 0x20, 0x2e, 0x20, 0x2d, 0x22, 0x32, 0x2e, 0x23, + 0x3a, 0x2e, 0x38, 0x37, 0x30, 0x2c, 0x39, 0x2b, + 0x2d, 0x2c, 0x22, 0x2c, 0x2e, 0x27, 0x2e, 0x26, + 0x32, 0x33, 0x25, 0x23, 0x31, 0x21, 0x25, 0x26, + 0x21, 0x2c, 0x30, 0x3c, 0x28, 0x30, 0x3c, 0x38, + 0x28, 0x30, 0x23, 0x25, 0x28, 0x35, 0x2e, 0x32, + 0x2e, 0x20, 0x21, 0x20, 0x2f, 0x27, 0x3c, 0x22, + 0x25, 0x20, 0x27, 0x25, 0x25, 0x30, 0x3a, 0x28, + // Entry 140 - 17F + 0x20, 0x28, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x22, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x23, 0x23, 0x2e, 0x22, + 0x31, 0x2e, 0x26, 0x2e, 0x20, +} + +// regionInclusionBits is an array of bit vectors where every vector represents +// a set of region groupings. These sets are used to compute the distance +// between two regions for the purpose of language matching. +// Size: 288 bytes, 72 elements +var regionInclusionBits = [72]uint32{ + // Entry 0 - 1F + 0x82400813, 0x000007a3, 0x00003844, 0x20000808, + 0x403c0011, 0x00000022, 0x20000844, 0x00000082, + 0x00000102, 0x00000202, 0x00000402, 0x2000384d, + 0x00001804, 0x20002804, 0x00404000, 0x00408000, + 0x00410000, 0x02020000, 0x00040010, 0x00080010, + 0x00100010, 0x00200010, 0x01c1c001, 0x00c00000, + 0x01400000, 0x1e020001, 0x06000000, 0x0a000000, + 0x12000000, 0x20002848, 0x40000010, 0x80000001, + // Entry 20 - 3F + 0x00000001, 0x40000000, 0x00020000, 0x01000000, + 0x00008000, 0x00002000, 0x00000200, 0x00000008, + 0x00200000, 0x90000000, 0x00040000, 0x08000000, + 0x00000020, 0x84000000, 0x00000080, 0x00001000, + 0x00010000, 0x00000400, 0x04000000, 0x00000040, + 0x10000000, 0x00004000, 0x81000000, 0x88000000, + 0x00000100, 0x80020000, 0x00080000, 0x00100000, + 0x00800000, 0xffffffff, 0x82400fb3, 0xc27c0813, + // Entry 40 - 5F + 0xa240385f, 0x83c1c813, 0x9e420813, 0x92000001, + 0x86000001, 0x81400001, 0x8a000001, 0x82020001, +} + +// regionInclusionNext marks, for each entry in regionInclusionBits, the set of +// all groups that are reachable from the groups set in the respective entry. +// Size: 72 bytes, 72 elements +var regionInclusionNext = [72]uint8{ + // Entry 0 - 3F + 0x3d, 0x3e, 0x0b, 0x0b, 0x3f, 0x01, 0x0b, 0x01, + 0x01, 0x01, 0x01, 0x40, 0x0b, 0x0b, 0x16, 0x16, + 0x16, 0x19, 0x04, 0x04, 0x04, 0x04, 0x41, 0x16, + 0x16, 0x42, 0x19, 0x19, 0x19, 0x0b, 0x04, 0x00, + 0x00, 0x1e, 0x11, 0x18, 0x0f, 0x0d, 0x09, 0x03, + 0x15, 0x43, 0x12, 0x1b, 0x05, 0x44, 0x07, 0x0c, + 0x10, 0x0a, 0x1a, 0x06, 0x1c, 0x0e, 0x45, 0x46, + 0x08, 0x47, 0x13, 0x14, 0x17, 0x3d, 0x3d, 0x3d, + // Entry 40 - 7F + 0x3d, 0x3d, 0x3d, 0x42, 0x42, 0x41, 0x42, 0x42, +} + +type parentRel struct { + lang uint16 + script uint8 + maxScript uint8 + toRegion uint16 + fromRegion []uint16 +} + +// Size: 412 bytes, 5 elements +var parents = [5]parentRel{ + 0: {lang: 0x134, script: 0x0, maxScript: 0x52, toRegion: 0x1, fromRegion: []uint16{0x1a, 0x24, 0x25, 0x2e, 0x33, 0x35, 0x3c, 0x41, 0x45, 0x47, 0x48, 0x49, 0x4f, 0x51, 0x5b, 0x5c, 0x60, 0x63, 0x6c, 0x72, 0x73, 0x74, 0x7a, 0x7b, 0x7e, 0x7f, 0x80, 0x82, 0x8b, 0x8c, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9e, 0x9f, 0xa3, 0xa6, 0xa8, 0xac, 0xb0, 0xb3, 0xb4, 0xbe, 0xc5, 0xc9, 0xca, 0xcb, 0xcd, 0xcf, 0xd1, 0xd4, 0xd5, 0xdc, 0xde, 0xdf, 0xe5, 0xe6, 0xe7, 0xea, 0xef, 0x106, 0x108, 0x109, 0x10a, 0x10c, 0x10d, 0x111, 0x116, 0x11a, 0x11c, 0x11e, 0x124, 0x128, 0x12b, 0x12c, 0x12e, 0x130, 0x138, 0x13b, 0x13e, 0x141, 0x160, 0x161, 0x163}}, + 1: {lang: 0x134, script: 0x0, maxScript: 0x52, toRegion: 0x1a, fromRegion: []uint16{0x2d, 0x4d, 0x5f, 0x62, 0x71, 0xd8, 0x10b, 0x10e}}, + 2: {lang: 0x139, script: 0x0, maxScript: 0x52, toRegion: 0x1e, fromRegion: []uint16{0x2b, 0x3e, 0x40, 0x50, 0x53, 0x55, 0x58, 0x64, 0x68, 0x88, 0x8e, 0xce, 0xd7, 0xe1, 0xe3, 0xeb, 0xf0, 0x119, 0x134, 0x135, 0x13a}}, + 3: {lang: 0x3b7, script: 0x0, maxScript: 0x52, toRegion: 0xed, fromRegion: []uint16{0x29, 0x4d, 0x59, 0x85, 0x8a, 0xb6, 0xc5, 0xd0, 0x117, 0x125}}, + 4: {lang: 0x51f, script: 0x35, maxScript: 0x35, toRegion: 0x8c, fromRegion: []uint16{0xc5}}, +} + +// Total table size 25825 bytes (25KiB); checksum: 4E97CC5E diff --git a/vendor/golang.org/x/text/language/tags.go b/vendor/golang.org/x/text/language/tags.go new file mode 100644 index 000000000..de30155a2 --- /dev/null +++ b/vendor/golang.org/x/text/language/tags.go @@ -0,0 +1,143 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +// TODO: Various sets of commonly use tags and regions. + +// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed. +// It simplifies safe initialization of Tag values. +func MustParse(s string) Tag { + t, err := Parse(s) + if err != nil { + panic(err) + } + return t +} + +// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed. +// It simplifies safe initialization of Tag values. +func (c CanonType) MustParse(s string) Tag { + t, err := c.Parse(s) + if err != nil { + panic(err) + } + return t +} + +// MustParseBase is like ParseBase, but panics if the given base cannot be parsed. +// It simplifies safe initialization of Base values. +func MustParseBase(s string) Base { + b, err := ParseBase(s) + if err != nil { + panic(err) + } + return b +} + +// MustParseScript is like ParseScript, but panics if the given script cannot be +// parsed. It simplifies safe initialization of Script values. +func MustParseScript(s string) Script { + scr, err := ParseScript(s) + if err != nil { + panic(err) + } + return scr +} + +// MustParseRegion is like ParseRegion, but panics if the given region cannot be +// parsed. It simplifies safe initialization of Region values. +func MustParseRegion(s string) Region { + r, err := ParseRegion(s) + if err != nil { + panic(err) + } + return r +} + +var ( + und = Tag{} + + Und Tag = Tag{} + + Afrikaans Tag = Tag{lang: _af} // af + Amharic Tag = Tag{lang: _am} // am + Arabic Tag = Tag{lang: _ar} // ar + ModernStandardArabic Tag = Tag{lang: _ar, region: _001} // ar-001 + Azerbaijani Tag = Tag{lang: _az} // az + Bulgarian Tag = Tag{lang: _bg} // bg + Bengali Tag = Tag{lang: _bn} // bn + Catalan Tag = Tag{lang: _ca} // ca + Czech Tag = Tag{lang: _cs} // cs + Danish Tag = Tag{lang: _da} // da + German Tag = Tag{lang: _de} // de + Greek Tag = Tag{lang: _el} // el + English Tag = Tag{lang: _en} // en + AmericanEnglish Tag = Tag{lang: _en, region: _US} // en-US + BritishEnglish Tag = Tag{lang: _en, region: _GB} // en-GB + Spanish Tag = Tag{lang: _es} // es + EuropeanSpanish Tag = Tag{lang: _es, region: _ES} // es-ES + LatinAmericanSpanish Tag = Tag{lang: _es, region: _419} // es-419 + Estonian Tag = Tag{lang: _et} // et + Persian Tag = Tag{lang: _fa} // fa + Finnish Tag = Tag{lang: _fi} // fi + Filipino Tag = Tag{lang: _fil} // fil + French Tag = Tag{lang: _fr} // fr + CanadianFrench Tag = Tag{lang: _fr, region: _CA} // fr-CA + Gujarati Tag = Tag{lang: _gu} // gu + Hebrew Tag = Tag{lang: _he} // he + Hindi Tag = Tag{lang: _hi} // hi + Croatian Tag = Tag{lang: _hr} // hr + Hungarian Tag = Tag{lang: _hu} // hu + Armenian Tag = Tag{lang: _hy} // hy + Indonesian Tag = Tag{lang: _id} // id + Icelandic Tag = Tag{lang: _is} // is + Italian Tag = Tag{lang: _it} // it + Japanese Tag = Tag{lang: _ja} // ja + Georgian Tag = Tag{lang: _ka} // ka + Kazakh Tag = Tag{lang: _kk} // kk + Khmer Tag = Tag{lang: _km} // km + Kannada Tag = Tag{lang: _kn} // kn + Korean Tag = Tag{lang: _ko} // ko + Kirghiz Tag = Tag{lang: _ky} // ky + Lao Tag = Tag{lang: _lo} // lo + Lithuanian Tag = Tag{lang: _lt} // lt + Latvian Tag = Tag{lang: _lv} // lv + Macedonian Tag = Tag{lang: _mk} // mk + Malayalam Tag = Tag{lang: _ml} // ml + Mongolian Tag = Tag{lang: _mn} // mn + Marathi Tag = Tag{lang: _mr} // mr + Malay Tag = Tag{lang: _ms} // ms + Burmese Tag = Tag{lang: _my} // my + Nepali Tag = Tag{lang: _ne} // ne + Dutch Tag = Tag{lang: _nl} // nl + Norwegian Tag = Tag{lang: _no} // no + Punjabi Tag = Tag{lang: _pa} // pa + Polish Tag = Tag{lang: _pl} // pl + Portuguese Tag = Tag{lang: _pt} // pt + BrazilianPortuguese Tag = Tag{lang: _pt, region: _BR} // pt-BR + EuropeanPortuguese Tag = Tag{lang: _pt, region: _PT} // pt-PT + Romanian Tag = Tag{lang: _ro} // ro + Russian Tag = Tag{lang: _ru} // ru + Sinhala Tag = Tag{lang: _si} // si + Slovak Tag = Tag{lang: _sk} // sk + Slovenian Tag = Tag{lang: _sl} // sl + Albanian Tag = Tag{lang: _sq} // sq + Serbian Tag = Tag{lang: _sr} // sr + SerbianLatin Tag = Tag{lang: _sr, script: _Latn} // sr-Latn + Swedish Tag = Tag{lang: _sv} // sv + Swahili Tag = Tag{lang: _sw} // sw + Tamil Tag = Tag{lang: _ta} // ta + Telugu Tag = Tag{lang: _te} // te + Thai Tag = Tag{lang: _th} // th + Turkish Tag = Tag{lang: _tr} // tr + Ukrainian Tag = Tag{lang: _uk} // uk + Urdu Tag = Tag{lang: _ur} // ur + Uzbek Tag = Tag{lang: _uz} // uz + Vietnamese Tag = Tag{lang: _vi} // vi + Chinese Tag = Tag{lang: _zh} // zh + SimplifiedChinese Tag = Tag{lang: _zh, script: _Hans} // zh-Hans + TraditionalChinese Tag = Tag{lang: _zh, script: _Hant} // zh-Hant + Zulu Tag = Tag{lang: _zu} // zu +) diff --git a/vendor/golang.org/x/text/runes/cond.go b/vendor/golang.org/x/text/runes/cond.go new file mode 100644 index 000000000..df7aa02db --- /dev/null +++ b/vendor/golang.org/x/text/runes/cond.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runes + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is. +// This is done for various reasons: +// - To retain the semantics of the Nop transformer: if input is passed to a Nop +// one would expect it to be unchanged. +// - It would be very expensive to pass a converted RuneError to a transformer: +// a transformer might need more source bytes after RuneError, meaning that +// the only way to pass it safely is to create a new buffer and manage the +// intermingling of RuneErrors and normal input. +// - Many transformers leave ill-formed UTF-8 as is, so this is not +// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a +// logical consequence of the operation (as for Map) or if it otherwise would +// pose security concerns (as for Remove). +// - An alternative would be to return an error on ill-formed UTF-8, but this +// would be inconsistent with other operations. + +// If returns a transformer that applies tIn to consecutive runes for which +// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset +// is called on tIn and tNotIn at the start of each run. A Nop transformer will +// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated +// to RuneError to determine which transformer to apply, but is passed as is to +// the respective transformer. +func If(s Set, tIn, tNotIn transform.Transformer) Transformer { + if tIn == nil && tNotIn == nil { + return Transformer{transform.Nop} + } + if tIn == nil { + tIn = transform.Nop + } + if tNotIn == nil { + tNotIn = transform.Nop + } + sIn, ok := tIn.(transform.SpanningTransformer) + if !ok { + sIn = dummySpan{tIn} + } + sNotIn, ok := tNotIn.(transform.SpanningTransformer) + if !ok { + sNotIn = dummySpan{tNotIn} + } + + a := &cond{ + tIn: sIn, + tNotIn: sNotIn, + f: s.Contains, + } + a.Reset() + return Transformer{a} +} + +type dummySpan struct{ transform.Transformer } + +func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) { + return 0, transform.ErrEndOfSpan +} + +type cond struct { + tIn, tNotIn transform.SpanningTransformer + f func(rune) bool + check func(rune) bool // current check to perform + t transform.SpanningTransformer // current transformer to use +} + +// Reset implements transform.Transformer. +func (t *cond) Reset() { + t.check = t.is + t.t = t.tIn + t.t.Reset() // notIn will be reset on first usage. +} + +func (t *cond) is(r rune) bool { + if t.f(r) { + return true + } + t.check = t.isNot + t.t = t.tNotIn + t.tNotIn.Reset() + return false +} + +func (t *cond) isNot(r rune) bool { + if !t.f(r) { + return true + } + t.check = t.is + t.t = t.tIn + t.tIn.Reset() + return false +} + +// This implementation of Span doesn't help all too much, but it needs to be +// there to satisfy this package's Transformer interface. +// TODO: there are certainly room for improvements, though. For example, if +// t.t == transform.Nop (which will a common occurrence) it will save a bundle +// to special-case that loop. +func (t *cond) Span(src []byte, atEOF bool) (n int, err error) { + p := 0 + for n < len(src) && err == nil { + // Don't process too much at a time as the Spanner that will be + // called on this block may terminate early. + const maxChunk = 4096 + max := len(src) + if v := n + maxChunk; v < max { + max = v + } + atEnd := false + size := 0 + current := t.t + for ; p < max; p += size { + r := rune(src[p]) + if r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[p:]); size == 1 { + if !atEOF && !utf8.FullRune(src[p:]) { + err = transform.ErrShortSrc + break + } + } + if !t.check(r) { + // The next rune will be the start of a new run. + atEnd = true + break + } + } + n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src))) + n += n2 + if err2 != nil { + return n, err2 + } + // At this point either err != nil or t.check will pass for the rune at p. + p = n + size + } + return n, err +} + +func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + p := 0 + for nSrc < len(src) && err == nil { + // Don't process too much at a time, as the work might be wasted if the + // destination buffer isn't large enough to hold the result or a + // transform returns an error early. + const maxChunk = 4096 + max := len(src) + if n := nSrc + maxChunk; n < len(src) { + max = n + } + atEnd := false + size := 0 + current := t.t + for ; p < max; p += size { + r := rune(src[p]) + if r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[p:]); size == 1 { + if !atEOF && !utf8.FullRune(src[p:]) { + err = transform.ErrShortSrc + break + } + } + if !t.check(r) { + // The next rune will be the start of a new run. + atEnd = true + break + } + } + nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src))) + nDst += nDst2 + nSrc += nSrc2 + if err2 != nil { + return nDst, nSrc, err2 + } + // At this point either err != nil or t.check will pass for the rune at p. + p = nSrc + size + } + return nDst, nSrc, err +} diff --git a/vendor/golang.org/x/text/runes/runes.go b/vendor/golang.org/x/text/runes/runes.go new file mode 100644 index 000000000..71933696f --- /dev/null +++ b/vendor/golang.org/x/text/runes/runes.go @@ -0,0 +1,355 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package runes provide transforms for UTF-8 encoded text. +package runes // import "golang.org/x/text/runes" + +import ( + "unicode" + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// A Set is a collection of runes. +type Set interface { + // Contains returns true if r is contained in the set. + Contains(r rune) bool +} + +type setFunc func(rune) bool + +func (s setFunc) Contains(r rune) bool { + return s(r) +} + +// Note: using funcs here instead of wrapping types result in cleaner +// documentation and a smaller API. + +// In creates a Set with a Contains method that returns true for all runes in +// the given RangeTable. +func In(rt *unicode.RangeTable) Set { + return setFunc(func(r rune) bool { return unicode.Is(rt, r) }) +} + +// In creates a Set with a Contains method that returns true for all runes not +// in the given RangeTable. +func NotIn(rt *unicode.RangeTable) Set { + return setFunc(func(r rune) bool { return !unicode.Is(rt, r) }) +} + +// Predicate creates a Set with a Contains method that returns f(r). +func Predicate(f func(rune) bool) Set { + return setFunc(f) +} + +// Transformer implements the transform.Transformer interface. +type Transformer struct { + t transform.SpanningTransformer +} + +func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return t.t.Transform(dst, src, atEOF) +} + +func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) { + return t.t.Span(b, atEOF) +} + +func (t Transformer) Reset() { t.t.Reset() } + +// Bytes returns a new byte slice with the result of converting b using t. It +// calls Reset on t. It returns nil if any error was found. This can only happen +// if an error-producing Transformer is passed to If. +func (t Transformer) Bytes(b []byte) []byte { + b, _, err := transform.Bytes(t, b) + if err != nil { + return nil + } + return b +} + +// String returns a string with the result of converting s using t. It calls +// Reset on t. It returns the empty string if any error was found. This can only +// happen if an error-producing Transformer is passed to If. +func (t Transformer) String(s string) string { + s, _, err := transform.String(t, s) + if err != nil { + return "" + } + return s +} + +// TODO: +// - Copy: copying strings and bytes in whole-rune units. +// - Validation (maybe) +// - Well-formed-ness (maybe) + +const runeErrorString = string(utf8.RuneError) + +// Remove returns a Transformer that removes runes r for which s.Contains(r). +// Illegal input bytes are replaced by RuneError before being passed to f. +func Remove(s Set) Transformer { + if f, ok := s.(setFunc); ok { + // This little trick cuts the running time of BenchmarkRemove for sets + // created by Predicate roughly in half. + // TODO: special-case RangeTables as well. + return Transformer{remove(f)} + } + return Transformer{remove(s.Contains)} +} + +// TODO: remove transform.RemoveFunc. + +type remove func(r rune) bool + +func (remove) Reset() {} + +// Span implements transform.Spanner. +func (t remove) Span(src []byte, atEOF bool) (n int, err error) { + for r, size := rune(0), 0; n < len(src); { + if r = rune(src[n]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + } else { + err = transform.ErrEndOfSpan + } + break + } + if t(r) { + err = transform.ErrEndOfSpan + break + } + n += size + } + return +} + +// Transform implements transform.Transformer. +func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for r, size := rune(0), 0; nSrc < len(src); { + if r = rune(src[nSrc]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + // We replace illegal bytes with RuneError. Not doing so might + // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. + // The resulting byte sequence may subsequently contain runes + // for which t(r) is true that were passed unnoticed. + if !t(utf8.RuneError) { + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + } + nSrc++ + continue + } + if t(r) { + nSrc += size + continue + } + if nDst+size > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < size; i++ { + dst[nDst] = src[nSrc] + nDst++ + nSrc++ + } + } + return +} + +// Map returns a Transformer that maps the runes in the input using the given +// mapping. Illegal bytes in the input are converted to utf8.RuneError before +// being passed to the mapping func. +func Map(mapping func(rune) rune) Transformer { + return Transformer{mapper(mapping)} +} + +type mapper func(rune) rune + +func (mapper) Reset() {} + +// Span implements transform.Spanner. +func (t mapper) Span(src []byte, atEOF bool) (n int, err error) { + for r, size := rune(0), 0; n < len(src); n += size { + if r = rune(src[n]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + } else { + err = transform.ErrEndOfSpan + } + break + } + if t(r) != r { + err = transform.ErrEndOfSpan + break + } + } + return n, err +} + +// Transform implements transform.Transformer. +func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + var replacement rune + var b [utf8.UTFMax]byte + + for r, size := rune(0), 0; nSrc < len(src); { + if r = rune(src[nSrc]); r < utf8.RuneSelf { + if replacement = t(r); replacement < utf8.RuneSelf { + if nDst == len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst] = byte(replacement) + nDst++ + nSrc++ + continue + } + size = 1 + } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + + if replacement = t(utf8.RuneError); replacement == utf8.RuneError { + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + nSrc++ + continue + } + } else if replacement = t(r); replacement == r { + if nDst+size > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < size; i++ { + dst[nDst] = src[nSrc] + nDst++ + nSrc++ + } + continue + } + + n := utf8.EncodeRune(b[:], replacement) + + if nDst+n > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < n; i++ { + dst[nDst] = b[i] + nDst++ + } + nSrc += size + } + return +} + +// ReplaceIllFormed returns a transformer that replaces all input bytes that are +// not part of a well-formed UTF-8 code sequence with utf8.RuneError. +func ReplaceIllFormed() Transformer { + return Transformer{&replaceIllFormed{}} +} + +type replaceIllFormed struct{ transform.NopResetter } + +func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + // ASCII fast path. + if src[n] < utf8.RuneSelf { + n++ + continue + } + + r, size := utf8.DecodeRune(src[n:]) + + // Look for a valid non-ASCII rune. + if r != utf8.RuneError || size != 1 { + n += size + continue + } + + // Look for short source data. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + break + } + + // We have an invalid rune. + err = transform.ErrEndOfSpan + break + } + return n, err +} + +func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + // ASCII fast path. + if r := src[nSrc]; r < utf8.RuneSelf { + if nDst == len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst] = r + nDst++ + nSrc++ + continue + } + + // Look for a valid non-ASCII rune. + if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + err = transform.ErrShortDst + break + } + nDst += size + nSrc += size + continue + } + + // Look for short source data. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + + // We have an invalid rune. + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + nSrc++ + } + return nDst, nSrc, err +} diff --git a/vendor/golang.org/x/text/secure/precis/class.go b/vendor/golang.org/x/text/secure/precis/class.go new file mode 100644 index 000000000..f6b56413b --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/class.go @@ -0,0 +1,36 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package precis + +import ( + "unicode/utf8" +) + +// TODO: Add contextual character rules from Appendix A of RFC5892. + +// A class is a set of characters that match certain derived properties. The +// PRECIS framework defines two classes: The Freeform class and the Identifier +// class. The freeform class should be used for profiles where expressiveness is +// prioritized over safety such as nicknames or passwords. The identifier class +// should be used for profiles where safety is the first priority such as +// addressable network labels and usernames. +type class struct { + validFrom property +} + +// Contains satisfies the runes.Set interface and returns whether the given rune +// is a member of the class. +func (c class) Contains(r rune) bool { + b := make([]byte, 4) + n := utf8.EncodeRune(b, r) + + trieval, _ := dpTrie.lookup(b[:n]) + return c.validFrom <= property(trieval) +} + +var ( + identifier = &class{validFrom: pValid} + freeform = &class{validFrom: idDisOrFreePVal} +) diff --git a/vendor/golang.org/x/text/secure/precis/context.go b/vendor/golang.org/x/text/secure/precis/context.go new file mode 100644 index 000000000..2dcaf29d7 --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/context.go @@ -0,0 +1,139 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package precis + +import "errors" + +// This file contains tables and code related to context rules. + +type catBitmap uint16 + +const ( + // These bits, once set depending on the current value, are never unset. + bJapanese catBitmap = 1 << iota + bArabicIndicDigit + bExtendedArabicIndicDigit + + // These bits are set on each iteration depending on the current value. + bJoinStart + bJoinMid + bJoinEnd + bVirama + bLatinSmallL + bGreek + bHebrew + + // These bits indicated which of the permanent bits need to be set at the + // end of the checks. + bMustHaveJapn + + permanent = bJapanese | bArabicIndicDigit | bExtendedArabicIndicDigit | bMustHaveJapn +) + +const finalShift = 10 + +var errContext = errors.New("precis: contextual rule violated") + +func init() { + // Programmatically set these required bits as, manually setting them seems + // too error prone. + for i, ct := range categoryTransitions { + categoryTransitions[i].keep |= permanent + categoryTransitions[i].accept |= ct.term + } +} + +var categoryTransitions = []struct { + keep catBitmap // mask selecting which bits to keep from the previous state + set catBitmap // mask for which bits to set for this transition + + // These bitmaps are used for rules that require lookahead. + // term&accept == term must be true, which is enforced programmatically. + term catBitmap // bits accepted as termination condition + accept catBitmap // bits that pass, but not sufficient as termination + + // The rule function cannot take a *context as an argument, as it would + // cause the context to escape, adding significant overhead. + rule func(beforeBits catBitmap) (doLookahead bool, err error) +}{ + joiningL: {set: bJoinStart}, + joiningD: {set: bJoinStart | bJoinEnd}, + joiningT: {keep: bJoinStart, set: bJoinMid}, + joiningR: {set: bJoinEnd}, + viramaModifier: {set: bVirama}, + viramaJoinT: {set: bVirama | bJoinMid}, + latinSmallL: {set: bLatinSmallL}, + greek: {set: bGreek}, + greekJoinT: {set: bGreek | bJoinMid}, + hebrew: {set: bHebrew}, + hebrewJoinT: {set: bHebrew | bJoinMid}, + japanese: {set: bJapanese}, + katakanaMiddleDot: {set: bMustHaveJapn}, + + zeroWidthNonJoiner: { + term: bJoinEnd, + accept: bJoinMid, + rule: func(before catBitmap) (doLookAhead bool, err error) { + if before&bVirama != 0 { + return false, nil + } + if before&bJoinStart == 0 { + return false, errContext + } + return true, nil + }, + }, + zeroWidthJoiner: { + rule: func(before catBitmap) (doLookAhead bool, err error) { + if before&bVirama == 0 { + err = errContext + } + return false, err + }, + }, + middleDot: { + term: bLatinSmallL, + rule: func(before catBitmap) (doLookAhead bool, err error) { + if before&bLatinSmallL == 0 { + return false, errContext + } + return true, nil + }, + }, + greekLowerNumeralSign: { + set: bGreek, + term: bGreek, + rule: func(before catBitmap) (doLookAhead bool, err error) { + return true, nil + }, + }, + hebrewPreceding: { + set: bHebrew, + rule: func(before catBitmap) (doLookAhead bool, err error) { + if before&bHebrew == 0 { + err = errContext + } + return false, err + }, + }, + arabicIndicDigit: { + set: bArabicIndicDigit, + rule: func(before catBitmap) (doLookAhead bool, err error) { + if before&bExtendedArabicIndicDigit != 0 { + err = errContext + } + return false, err + }, + }, + extendedArabicIndicDigit: { + set: bExtendedArabicIndicDigit, + rule: func(before catBitmap) (doLookAhead bool, err error) { + if before&bArabicIndicDigit != 0 { + err = errContext + } + return false, err + }, + }, +} diff --git a/vendor/golang.org/x/text/secure/precis/doc.go b/vendor/golang.org/x/text/secure/precis/doc.go new file mode 100644 index 000000000..48500fe1c --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/doc.go @@ -0,0 +1,14 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package precis contains types and functions for the preparation, +// enforcement, and comparison of internationalized strings ("PRECIS") as +// defined in RFC 7564. It also contains several pre-defined profiles for +// passwords, nicknames, and usernames as defined in RFC 7613 and RFC 7700. +// +// BE ADVISED: This package is under construction and the API may change in +// backwards incompatible ways and without notice. +package precis // import "golang.org/x/text/secure/precis" + +//go:generate go run gen.go gen_trieval.go diff --git a/vendor/golang.org/x/text/secure/precis/nickname.go b/vendor/golang.org/x/text/secure/precis/nickname.go new file mode 100644 index 000000000..cd54b9e69 --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/nickname.go @@ -0,0 +1,70 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package precis + +import ( + "unicode" + "unicode/utf8" + + "golang.org/x/text/transform" +) + +type nickAdditionalMapping struct { + // TODO: This transformer needs to be stateless somehow… + notStart bool + prevSpace bool +} + +func (t *nickAdditionalMapping) Reset() { + t.prevSpace = false + t.notStart = false +} + +func (t *nickAdditionalMapping) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + // RFC 7700 §2.1. Rules + // + // 2. Additional Mapping Rule: The additional mapping rule consists of + // the following sub-rules. + // + // 1. Any instances of non-ASCII space MUST be mapped to ASCII + // space (U+0020); a non-ASCII space is any Unicode code point + // having a general category of "Zs", naturally with the + // exception of U+0020. + // + // 2. Any instances of the ASCII space character at the beginning + // or end of a nickname MUST be removed (e.g., "stpeter " is + // mapped to "stpeter"). + // + // 3. Interior sequences of more than one ASCII space character + // MUST be mapped to a single ASCII space character (e.g., + // "St Peter" is mapped to "St Peter"). + + for nSrc < len(src) { + r, size := utf8.DecodeRune(src[nSrc:]) + if size == 0 { // Incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 + } + if unicode.Is(unicode.Zs, r) { + t.prevSpace = true + } else { + if t.prevSpace && t.notStart { + dst[nDst] = ' ' + nDst += 1 + } + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + nDst += size + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + t.prevSpace = false + t.notStart = true + } + nSrc += size + } + return nDst, nSrc, nil +} diff --git a/vendor/golang.org/x/text/secure/precis/options.go b/vendor/golang.org/x/text/secure/precis/options.go new file mode 100644 index 000000000..488f0b1f7 --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/options.go @@ -0,0 +1,153 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package precis + +import ( + "golang.org/x/text/cases" + "golang.org/x/text/language" + "golang.org/x/text/runes" + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +// An Option is used to define the behavior and rules of a Profile. +type Option func(*options) + +type options struct { + // Preparation options + foldWidth bool + + // Enforcement options + asciiLower bool + cases transform.SpanningTransformer + disallow runes.Set + norm transform.SpanningTransformer + additional []func() transform.SpanningTransformer + width transform.SpanningTransformer + disallowEmpty bool + bidiRule bool + + // Comparison options + ignorecase bool +} + +func getOpts(o ...Option) (res options) { + for _, f := range o { + f(&res) + } + // Using a SpanningTransformer, instead of norm.Form prevents an allocation + // down the road. + if res.norm == nil { + res.norm = norm.NFC + } + return +} + +var ( + // The IgnoreCase option causes the profile to perform a case insensitive + // comparison during the PRECIS comparison step. + IgnoreCase Option = ignoreCase + + // The FoldWidth option causes the profile to map non-canonical wide and + // narrow variants to their decomposition mapping. This is useful for + // profiles that are based on the identifier class which would otherwise + // disallow such characters. + FoldWidth Option = foldWidth + + // The DisallowEmpty option causes the enforcement step to return an error if + // the resulting string would be empty. + DisallowEmpty Option = disallowEmpty + + // The BidiRule option causes the Bidi Rule defined in RFC 5893 to be + // applied. + BidiRule Option = bidiRule +) + +var ( + ignoreCase = func(o *options) { + o.ignorecase = true + } + foldWidth = func(o *options) { + o.foldWidth = true + } + disallowEmpty = func(o *options) { + o.disallowEmpty = true + } + bidiRule = func(o *options) { + o.bidiRule = true + } +) + +// TODO: move this logic to package transform + +type spanWrap struct{ transform.Transformer } + +func (s spanWrap) Span(src []byte, atEOF bool) (n int, err error) { + return 0, transform.ErrEndOfSpan +} + +// TODO: allow different types? For instance: +// func() transform.Transformer +// func() transform.SpanningTransformer +// func([]byte) bool // validation only +// +// Also, would be great if we could detect if a transformer is reentrant. + +// The AdditionalMapping option defines the additional mapping rule for the +// Profile by applying Transformer's in sequence. +func AdditionalMapping(t ...func() transform.Transformer) Option { + return func(o *options) { + for _, f := range t { + sf := func() transform.SpanningTransformer { + return f().(transform.SpanningTransformer) + } + if _, ok := f().(transform.SpanningTransformer); !ok { + sf = func() transform.SpanningTransformer { + return spanWrap{f()} + } + } + o.additional = append(o.additional, sf) + } + } +} + +// The Norm option defines a Profile's normalization rule. Defaults to NFC. +func Norm(f norm.Form) Option { + return func(o *options) { + o.norm = f + } +} + +// The FoldCase option defines a Profile's case mapping rule. Options can be +// provided to determine the type of case folding used. +func FoldCase(opts ...cases.Option) Option { + return func(o *options) { + o.asciiLower = true + o.cases = cases.Fold(opts...) + } +} + +// The LowerCase option defines a Profile's case mapping rule. Options can be +// provided to determine the type of case folding used. +func LowerCase(opts ...cases.Option) Option { + return func(o *options) { + o.asciiLower = true + if len(opts) == 0 { + o.cases = cases.Lower(language.Und, cases.HandleFinalSigma(false)) + return + } + + opts = append([]cases.Option{cases.HandleFinalSigma(false)}, opts...) + o.cases = cases.Lower(language.Und, opts...) + } +} + +// The Disallow option further restricts a Profile's allowed characters beyond +// what is disallowed by the underlying string class. +func Disallow(set runes.Set) Option { + return func(o *options) { + o.disallow = set + } +} diff --git a/vendor/golang.org/x/text/secure/precis/profile.go b/vendor/golang.org/x/text/secure/precis/profile.go new file mode 100644 index 000000000..1d7898d47 --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/profile.go @@ -0,0 +1,388 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package precis + +import ( + "bytes" + "errors" + "unicode/utf8" + + "golang.org/x/text/cases" + "golang.org/x/text/language" + "golang.org/x/text/runes" + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/transform" + "golang.org/x/text/width" +) + +var ( + errDisallowedRune = errors.New("precis: disallowed rune encountered") +) + +var dpTrie = newDerivedPropertiesTrie(0) + +// A Profile represents a set of rules for normalizing and validating strings in +// the PRECIS framework. +type Profile struct { + options + class *class +} + +// NewIdentifier creates a new PRECIS profile based on the Identifier string +// class. Profiles created from this class are suitable for use where safety is +// prioritized over expressiveness like network identifiers, user accounts, chat +// rooms, and file names. +func NewIdentifier(opts ...Option) *Profile { + return &Profile{ + options: getOpts(opts...), + class: identifier, + } +} + +// NewFreeform creates a new PRECIS profile based on the Freeform string class. +// Profiles created from this class are suitable for use where expressiveness is +// prioritized over safety like passwords, and display-elements such as +// nicknames in a chat room. +func NewFreeform(opts ...Option) *Profile { + return &Profile{ + options: getOpts(opts...), + class: freeform, + } +} + +// NewTransformer creates a new transform.Transformer that performs the PRECIS +// preparation and enforcement steps on the given UTF-8 encoded bytes. +func (p *Profile) NewTransformer() *Transformer { + var ts []transform.Transformer + + // These transforms are applied in the order defined in + // https://tools.ietf.org/html/rfc7564#section-7 + + if p.options.foldWidth { + ts = append(ts, width.Fold) + } + + for _, f := range p.options.additional { + ts = append(ts, f()) + } + + if p.options.cases != nil { + ts = append(ts, p.options.cases) + } + + ts = append(ts, p.options.norm) + + if p.options.bidiRule { + ts = append(ts, bidirule.New()) + } + + ts = append(ts, &checker{p: p, allowed: p.Allowed()}) + + // TODO: Add the disallow empty rule with a dummy transformer? + + return &Transformer{transform.Chain(ts...)} +} + +var errEmptyString = errors.New("precis: transformation resulted in empty string") + +type buffers struct { + src []byte + buf [2][]byte + next int +} + +func (b *buffers) apply(t transform.SpanningTransformer) (err error) { + n, err := t.Span(b.src, true) + if err != transform.ErrEndOfSpan { + return err + } + x := b.next & 1 + if b.buf[x] == nil { + b.buf[x] = make([]byte, 0, 8+len(b.src)+len(b.src)>>2) + } + span := append(b.buf[x][:0], b.src[:n]...) + b.src, _, err = transform.Append(t, span, b.src[n:]) + b.buf[x] = b.src + b.next++ + return err +} + +// Pre-allocate transformers when possible. In some cases this avoids allocation. +var ( + foldWidthT transform.SpanningTransformer = width.Fold + lowerCaseT transform.SpanningTransformer = cases.Lower(language.Und, cases.HandleFinalSigma(false)) +) + +// TODO: make this a method on profile. + +func (b *buffers) enforce(p *Profile, src []byte, comparing bool) (str []byte, err error) { + b.src = src + + ascii := true + for _, c := range src { + if c >= utf8.RuneSelf { + ascii = false + break + } + } + // ASCII fast path. + if ascii { + for _, f := range p.options.additional { + if err = b.apply(f()); err != nil { + return nil, err + } + } + switch { + case p.options.asciiLower || (comparing && p.options.ignorecase): + for i, c := range b.src { + if 'A' <= c && c <= 'Z' { + b.src[i] = c ^ 1<<5 + } + } + case p.options.cases != nil: + b.apply(p.options.cases) + } + c := checker{p: p} + if _, err := c.span(b.src, true); err != nil { + return nil, err + } + if p.disallow != nil { + for _, c := range b.src { + if p.disallow.Contains(rune(c)) { + return nil, errDisallowedRune + } + } + } + if p.options.disallowEmpty && len(b.src) == 0 { + return nil, errEmptyString + } + return b.src, nil + } + + // These transforms are applied in the order defined in + // https://tools.ietf.org/html/rfc7564#section-7 + + // TODO: allow different width transforms options. + if p.options.foldWidth || (p.options.ignorecase && comparing) { + b.apply(foldWidthT) + } + for _, f := range p.options.additional { + if err = b.apply(f()); err != nil { + return nil, err + } + } + if p.options.cases != nil { + b.apply(p.options.cases) + } + if comparing && p.options.ignorecase { + b.apply(lowerCaseT) + } + b.apply(p.norm) + if p.options.bidiRule && !bidirule.Valid(b.src) { + return nil, bidirule.ErrInvalid + } + c := checker{p: p} + if _, err := c.span(b.src, true); err != nil { + return nil, err + } + if p.disallow != nil { + for i := 0; i < len(b.src); { + r, size := utf8.DecodeRune(b.src[i:]) + if p.disallow.Contains(r) { + return nil, errDisallowedRune + } + i += size + } + } + if p.options.disallowEmpty && len(b.src) == 0 { + return nil, errEmptyString + } + return b.src, nil +} + +// Append appends the result of applying p to src writing the result to dst. +// It returns an error if the input string is invalid. +func (p *Profile) Append(dst, src []byte) ([]byte, error) { + var buf buffers + b, err := buf.enforce(p, src, false) + if err != nil { + return nil, err + } + return append(dst, b...), nil +} + +func processBytes(p *Profile, b []byte, key bool) ([]byte, error) { + var buf buffers + b, err := buf.enforce(p, b, key) + if err != nil { + return nil, err + } + if buf.next == 0 { + c := make([]byte, len(b)) + copy(c, b) + return c, nil + } + return b, nil +} + +// Bytes returns a new byte slice with the result of applying the profile to b. +func (p *Profile) Bytes(b []byte) ([]byte, error) { + return processBytes(p, b, false) +} + +// AppendCompareKey appends the result of applying p to src (including any +// optional rules to make strings comparable or useful in a map key such as +// applying lowercasing) writing the result to dst. It returns an error if the +// input string is invalid. +func (p *Profile) AppendCompareKey(dst, src []byte) ([]byte, error) { + var buf buffers + b, err := buf.enforce(p, src, true) + if err != nil { + return nil, err + } + return append(dst, b...), nil +} + +func processString(p *Profile, s string, key bool) (string, error) { + var buf buffers + b, err := buf.enforce(p, []byte(s), key) + if err != nil { + return "", err + } + return string(b), nil +} + +// String returns a string with the result of applying the profile to s. +func (p *Profile) String(s string) (string, error) { + return processString(p, s, false) +} + +// CompareKey returns a string that can be used for comparison, hashing, or +// collation. +func (p *Profile) CompareKey(s string) (string, error) { + return processString(p, s, true) +} + +// Compare enforces both strings, and then compares them for bit-string identity +// (byte-for-byte equality). If either string cannot be enforced, the comparison +// is false. +func (p *Profile) Compare(a, b string) bool { + var buf buffers + + akey, err := buf.enforce(p, []byte(a), true) + if err != nil { + return false + } + + buf = buffers{} + bkey, err := buf.enforce(p, []byte(b), true) + if err != nil { + return false + } + + return bytes.Compare(akey, bkey) == 0 +} + +// Allowed returns a runes.Set containing every rune that is a member of the +// underlying profile's string class and not disallowed by any profile specific +// rules. +func (p *Profile) Allowed() runes.Set { + if p.options.disallow != nil { + return runes.Predicate(func(r rune) bool { + return p.class.Contains(r) && !p.options.disallow.Contains(r) + }) + } + return p.class +} + +type checker struct { + p *Profile + allowed runes.Set + + beforeBits catBitmap + termBits catBitmap + acceptBits catBitmap +} + +func (c *checker) Reset() { + c.beforeBits = 0 + c.termBits = 0 + c.acceptBits = 0 +} + +func (c *checker) span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + e, sz := dpTrie.lookup(src[n:]) + d := categoryTransitions[category(e&catMask)] + if sz == 0 { + if !atEOF { + return n, transform.ErrShortSrc + } + return n, errDisallowedRune + } + if property(e) < c.p.class.validFrom { + if d.rule == nil { + return n, errDisallowedRune + } + doLookAhead, err := d.rule(c.beforeBits) + if err != nil { + return n, err + } + if doLookAhead { + c.beforeBits &= d.keep + c.beforeBits |= d.set + // We may still have a lookahead rule which we will require to + // complete (by checking termBits == 0) before setting the new + // bits. + if c.termBits != 0 && (!c.checkLookahead() || c.termBits == 0) { + return n, err + } + c.termBits = d.term + c.acceptBits = d.accept + n += sz + continue + } + } + c.beforeBits &= d.keep + c.beforeBits |= d.set + if c.termBits != 0 && !c.checkLookahead() { + return n, errContext + } + n += sz + } + if m := c.beforeBits >> finalShift; c.beforeBits&m != m || c.termBits != 0 { + err = errContext + } + return n, err +} + +func (c *checker) checkLookahead() bool { + switch { + case c.beforeBits&c.termBits != 0: + c.termBits = 0 + c.acceptBits = 0 + case c.beforeBits&c.acceptBits != 0: + default: + return false + } + return true +} + +// TODO: we may get rid of this transform if transform.Chain understands +// something like a Spanner interface. +func (c checker) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + short := false + if len(dst) < len(src) { + src = src[:len(dst)] + atEOF = false + short = true + } + nSrc, err = c.span(src, atEOF) + nDst = copy(dst, src[:nSrc]) + if short && (err == transform.ErrShortSrc || err == nil) { + err = transform.ErrShortDst + } + return nDst, nSrc, err +} diff --git a/vendor/golang.org/x/text/secure/precis/profiles.go b/vendor/golang.org/x/text/secure/precis/profiles.go new file mode 100644 index 000000000..86010025c --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/profiles.go @@ -0,0 +1,78 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package precis + +import ( + "unicode" + + "golang.org/x/text/runes" + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +var ( + // Implements the Nickname profile specified in RFC 7700. + // The nickname profile is not idempotent and may need to be applied multiple + // times before being used for comparisons. + Nickname *Profile = nickname + + // Implements the UsernameCaseMapped profile specified in RFC 7613. + UsernameCaseMapped *Profile = usernameCaseMap + + // Implements the UsernameCasePreserved profile specified in RFC 7613. + UsernameCasePreserved *Profile = usernameNoCaseMap + + // Implements the OpaqueString profile defined in RFC 7613 for passwords and other secure labels. + OpaqueString *Profile = opaquestring +) + +var ( + nickname = &Profile{ + options: getOpts( + AdditionalMapping(func() transform.Transformer { + return &nickAdditionalMapping{} + }), + IgnoreCase, + Norm(norm.NFKC), + DisallowEmpty, + ), + class: freeform, + } + usernameCaseMap = &Profile{ + options: getOpts( + FoldWidth, + LowerCase(), + Norm(norm.NFC), + BidiRule, + ), + class: identifier, + } + usernameNoCaseMap = &Profile{ + options: getOpts( + FoldWidth, + Norm(norm.NFC), + BidiRule, + ), + class: identifier, + } + opaquestring = &Profile{ + options: getOpts( + AdditionalMapping(func() transform.Transformer { + return mapSpaces + }), + Norm(norm.NFC), + DisallowEmpty, + ), + class: freeform, + } +) + +// mapSpaces is a shared value of a runes.Map transformer. +var mapSpaces transform.Transformer = runes.Map(func(r rune) rune { + if unicode.Is(unicode.Zs, r) { + return ' ' + } + return r +}) diff --git a/vendor/golang.org/x/text/secure/precis/tables.go b/vendor/golang.org/x/text/secure/precis/tables.go new file mode 100644 index 000000000..2f550c1ef --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/tables.go @@ -0,0 +1,3788 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package precis + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *derivedPropertiesTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return derivedPropertiesValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := derivedPropertiesIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := derivedPropertiesIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = derivedPropertiesIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := derivedPropertiesIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = derivedPropertiesIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = derivedPropertiesIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *derivedPropertiesTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return derivedPropertiesValues[c0] + } + i := derivedPropertiesIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = derivedPropertiesIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = derivedPropertiesIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *derivedPropertiesTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return derivedPropertiesValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := derivedPropertiesIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := derivedPropertiesIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = derivedPropertiesIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := derivedPropertiesIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = derivedPropertiesIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = derivedPropertiesIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *derivedPropertiesTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return derivedPropertiesValues[c0] + } + i := derivedPropertiesIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = derivedPropertiesIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = derivedPropertiesIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// derivedPropertiesTrie. Total size: 25344 bytes (24.75 KiB). Checksum: c5b977d76d42d8a. +type derivedPropertiesTrie struct{} + +func newDerivedPropertiesTrie(i int) *derivedPropertiesTrie { + return &derivedPropertiesTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *derivedPropertiesTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(derivedPropertiesValues[n<<6+uint32(b)]) + } +} + +// derivedPropertiesValues: 324 blocks, 20736 entries, 20736 bytes +// The third block is the zero block. +var derivedPropertiesValues = [20736]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x0040, 0x01: 0x0040, 0x02: 0x0040, 0x03: 0x0040, 0x04: 0x0040, 0x05: 0x0040, + 0x06: 0x0040, 0x07: 0x0040, 0x08: 0x0040, 0x09: 0x0040, 0x0a: 0x0040, 0x0b: 0x0040, + 0x0c: 0x0040, 0x0d: 0x0040, 0x0e: 0x0040, 0x0f: 0x0040, 0x10: 0x0040, 0x11: 0x0040, + 0x12: 0x0040, 0x13: 0x0040, 0x14: 0x0040, 0x15: 0x0040, 0x16: 0x0040, 0x17: 0x0040, + 0x18: 0x0040, 0x19: 0x0040, 0x1a: 0x0040, 0x1b: 0x0040, 0x1c: 0x0040, 0x1d: 0x0040, + 0x1e: 0x0040, 0x1f: 0x0040, 0x20: 0x0080, 0x21: 0x00c0, 0x22: 0x00c0, 0x23: 0x00c0, + 0x24: 0x00c0, 0x25: 0x00c0, 0x26: 0x00c0, 0x27: 0x00c0, 0x28: 0x00c0, 0x29: 0x00c0, + 0x2a: 0x00c0, 0x2b: 0x00c0, 0x2c: 0x00c0, 0x2d: 0x00c0, 0x2e: 0x00c0, 0x2f: 0x00c0, + 0x30: 0x00c0, 0x31: 0x00c0, 0x32: 0x00c0, 0x33: 0x00c0, 0x34: 0x00c0, 0x35: 0x00c0, + 0x36: 0x00c0, 0x37: 0x00c0, 0x38: 0x00c0, 0x39: 0x00c0, 0x3a: 0x00c0, 0x3b: 0x00c0, + 0x3c: 0x00c0, 0x3d: 0x00c0, 0x3e: 0x00c0, 0x3f: 0x00c0, + // Block 0x1, offset 0x40 + 0x40: 0x00c0, 0x41: 0x00c0, 0x42: 0x00c0, 0x43: 0x00c0, 0x44: 0x00c0, 0x45: 0x00c0, + 0x46: 0x00c0, 0x47: 0x00c0, 0x48: 0x00c0, 0x49: 0x00c0, 0x4a: 0x00c0, 0x4b: 0x00c0, + 0x4c: 0x00c0, 0x4d: 0x00c0, 0x4e: 0x00c0, 0x4f: 0x00c0, 0x50: 0x00c0, 0x51: 0x00c0, + 0x52: 0x00c0, 0x53: 0x00c0, 0x54: 0x00c0, 0x55: 0x00c0, 0x56: 0x00c0, 0x57: 0x00c0, + 0x58: 0x00c0, 0x59: 0x00c0, 0x5a: 0x00c0, 0x5b: 0x00c0, 0x5c: 0x00c0, 0x5d: 0x00c0, + 0x5e: 0x00c0, 0x5f: 0x00c0, 0x60: 0x00c0, 0x61: 0x00c0, 0x62: 0x00c0, 0x63: 0x00c0, + 0x64: 0x00c0, 0x65: 0x00c0, 0x66: 0x00c0, 0x67: 0x00c0, 0x68: 0x00c0, 0x69: 0x00c0, + 0x6a: 0x00c0, 0x6b: 0x00c0, 0x6c: 0x00c7, 0x6d: 0x00c0, 0x6e: 0x00c0, 0x6f: 0x00c0, + 0x70: 0x00c0, 0x71: 0x00c0, 0x72: 0x00c0, 0x73: 0x00c0, 0x74: 0x00c0, 0x75: 0x00c0, + 0x76: 0x00c0, 0x77: 0x00c0, 0x78: 0x00c0, 0x79: 0x00c0, 0x7a: 0x00c0, 0x7b: 0x00c0, + 0x7c: 0x00c0, 0x7d: 0x00c0, 0x7e: 0x00c0, 0x7f: 0x0040, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x0080, 0xe1: 0x0080, 0xe2: 0x0080, 0xe3: 0x0080, + 0xe4: 0x0080, 0xe5: 0x0080, 0xe6: 0x0080, 0xe7: 0x0080, 0xe8: 0x0080, 0xe9: 0x0080, + 0xea: 0x0080, 0xeb: 0x0080, 0xec: 0x0080, 0xed: 0x0040, 0xee: 0x0080, 0xef: 0x0080, + 0xf0: 0x0080, 0xf1: 0x0080, 0xf2: 0x0080, 0xf3: 0x0080, 0xf4: 0x0080, 0xf5: 0x0080, + 0xf6: 0x0080, 0xf7: 0x004f, 0xf8: 0x0080, 0xf9: 0x0080, 0xfa: 0x0080, 0xfb: 0x0080, + 0xfc: 0x0080, 0xfd: 0x0080, 0xfe: 0x0080, 0xff: 0x0080, + // Block 0x4, offset 0x100 + 0x100: 0x00c0, 0x101: 0x00c0, 0x102: 0x00c0, 0x103: 0x00c0, 0x104: 0x00c0, 0x105: 0x00c0, + 0x106: 0x00c0, 0x107: 0x00c0, 0x108: 0x00c0, 0x109: 0x00c0, 0x10a: 0x00c0, 0x10b: 0x00c0, + 0x10c: 0x00c0, 0x10d: 0x00c0, 0x10e: 0x00c0, 0x10f: 0x00c0, 0x110: 0x00c0, 0x111: 0x00c0, + 0x112: 0x00c0, 0x113: 0x00c0, 0x114: 0x00c0, 0x115: 0x00c0, 0x116: 0x00c0, 0x117: 0x0080, + 0x118: 0x00c0, 0x119: 0x00c0, 0x11a: 0x00c0, 0x11b: 0x00c0, 0x11c: 0x00c0, 0x11d: 0x00c0, + 0x11e: 0x00c0, 0x11f: 0x00c0, 0x120: 0x00c0, 0x121: 0x00c0, 0x122: 0x00c0, 0x123: 0x00c0, + 0x124: 0x00c0, 0x125: 0x00c0, 0x126: 0x00c0, 0x127: 0x00c0, 0x128: 0x00c0, 0x129: 0x00c0, + 0x12a: 0x00c0, 0x12b: 0x00c0, 0x12c: 0x00c0, 0x12d: 0x00c0, 0x12e: 0x00c0, 0x12f: 0x00c0, + 0x130: 0x00c0, 0x131: 0x00c0, 0x132: 0x00c0, 0x133: 0x00c0, 0x134: 0x00c0, 0x135: 0x00c0, + 0x136: 0x00c0, 0x137: 0x0080, 0x138: 0x00c0, 0x139: 0x00c0, 0x13a: 0x00c0, 0x13b: 0x00c0, + 0x13c: 0x00c0, 0x13d: 0x00c0, 0x13e: 0x00c0, 0x13f: 0x00c0, + // Block 0x5, offset 0x140 + 0x140: 0x00c0, 0x141: 0x00c0, 0x142: 0x00c0, 0x143: 0x00c0, 0x144: 0x00c0, 0x145: 0x00c0, + 0x146: 0x00c0, 0x147: 0x00c0, 0x148: 0x00c0, 0x149: 0x00c0, 0x14a: 0x00c0, 0x14b: 0x00c0, + 0x14c: 0x00c0, 0x14d: 0x00c0, 0x14e: 0x00c0, 0x14f: 0x00c0, 0x150: 0x00c0, 0x151: 0x00c0, + 0x152: 0x00c0, 0x153: 0x00c0, 0x154: 0x00c0, 0x155: 0x00c0, 0x156: 0x00c0, 0x157: 0x00c0, + 0x158: 0x00c0, 0x159: 0x00c0, 0x15a: 0x00c0, 0x15b: 0x00c0, 0x15c: 0x00c0, 0x15d: 0x00c0, + 0x15e: 0x00c0, 0x15f: 0x00c0, 0x160: 0x00c0, 0x161: 0x00c0, 0x162: 0x00c0, 0x163: 0x00c0, + 0x164: 0x00c0, 0x165: 0x00c0, 0x166: 0x00c0, 0x167: 0x00c0, 0x168: 0x00c0, 0x169: 0x00c0, + 0x16a: 0x00c0, 0x16b: 0x00c0, 0x16c: 0x00c0, 0x16d: 0x00c0, 0x16e: 0x00c0, 0x16f: 0x00c0, + 0x170: 0x00c0, 0x171: 0x00c0, 0x172: 0x0080, 0x173: 0x0080, 0x174: 0x00c0, 0x175: 0x00c0, + 0x176: 0x00c0, 0x177: 0x00c0, 0x178: 0x00c0, 0x179: 0x00c0, 0x17a: 0x00c0, 0x17b: 0x00c0, + 0x17c: 0x00c0, 0x17d: 0x00c0, 0x17e: 0x00c0, 0x17f: 0x0080, + // Block 0x6, offset 0x180 + 0x180: 0x0080, 0x181: 0x00c0, 0x182: 0x00c0, 0x183: 0x00c0, 0x184: 0x00c0, 0x185: 0x00c0, + 0x186: 0x00c0, 0x187: 0x00c0, 0x188: 0x00c0, 0x189: 0x0080, 0x18a: 0x00c0, 0x18b: 0x00c0, + 0x18c: 0x00c0, 0x18d: 0x00c0, 0x18e: 0x00c0, 0x18f: 0x00c0, 0x190: 0x00c0, 0x191: 0x00c0, + 0x192: 0x00c0, 0x193: 0x00c0, 0x194: 0x00c0, 0x195: 0x00c0, 0x196: 0x00c0, 0x197: 0x00c0, + 0x198: 0x00c0, 0x199: 0x00c0, 0x19a: 0x00c0, 0x19b: 0x00c0, 0x19c: 0x00c0, 0x19d: 0x00c0, + 0x19e: 0x00c0, 0x19f: 0x00c0, 0x1a0: 0x00c0, 0x1a1: 0x00c0, 0x1a2: 0x00c0, 0x1a3: 0x00c0, + 0x1a4: 0x00c0, 0x1a5: 0x00c0, 0x1a6: 0x00c0, 0x1a7: 0x00c0, 0x1a8: 0x00c0, 0x1a9: 0x00c0, + 0x1aa: 0x00c0, 0x1ab: 0x00c0, 0x1ac: 0x00c0, 0x1ad: 0x00c0, 0x1ae: 0x00c0, 0x1af: 0x00c0, + 0x1b0: 0x00c0, 0x1b1: 0x00c0, 0x1b2: 0x00c0, 0x1b3: 0x00c0, 0x1b4: 0x00c0, 0x1b5: 0x00c0, + 0x1b6: 0x00c0, 0x1b7: 0x00c0, 0x1b8: 0x00c0, 0x1b9: 0x00c0, 0x1ba: 0x00c0, 0x1bb: 0x00c0, + 0x1bc: 0x00c0, 0x1bd: 0x00c0, 0x1be: 0x00c0, 0x1bf: 0x0080, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x00c0, 0x1c1: 0x00c0, 0x1c2: 0x00c0, 0x1c3: 0x00c0, 0x1c4: 0x00c0, 0x1c5: 0x00c0, + 0x1c6: 0x00c0, 0x1c7: 0x00c0, 0x1c8: 0x00c0, 0x1c9: 0x00c0, 0x1ca: 0x00c0, 0x1cb: 0x00c0, + 0x1cc: 0x00c0, 0x1cd: 0x00c0, 0x1ce: 0x00c0, 0x1cf: 0x00c0, 0x1d0: 0x00c0, 0x1d1: 0x00c0, + 0x1d2: 0x00c0, 0x1d3: 0x00c0, 0x1d4: 0x00c0, 0x1d5: 0x00c0, 0x1d6: 0x00c0, 0x1d7: 0x00c0, + 0x1d8: 0x00c0, 0x1d9: 0x00c0, 0x1da: 0x00c0, 0x1db: 0x00c0, 0x1dc: 0x00c0, 0x1dd: 0x00c0, + 0x1de: 0x00c0, 0x1df: 0x00c0, 0x1e0: 0x00c0, 0x1e1: 0x00c0, 0x1e2: 0x00c0, 0x1e3: 0x00c0, + 0x1e4: 0x00c0, 0x1e5: 0x00c0, 0x1e6: 0x00c0, 0x1e7: 0x00c0, 0x1e8: 0x00c0, 0x1e9: 0x00c0, + 0x1ea: 0x00c0, 0x1eb: 0x00c0, 0x1ec: 0x00c0, 0x1ed: 0x00c0, 0x1ee: 0x00c0, 0x1ef: 0x00c0, + 0x1f0: 0x00c0, 0x1f1: 0x00c0, 0x1f2: 0x00c0, 0x1f3: 0x00c0, 0x1f4: 0x00c0, 0x1f5: 0x00c0, + 0x1f6: 0x00c0, 0x1f7: 0x00c0, 0x1f8: 0x00c0, 0x1f9: 0x00c0, 0x1fa: 0x00c0, 0x1fb: 0x00c0, + 0x1fc: 0x00c0, 0x1fd: 0x00c0, 0x1fe: 0x00c0, 0x1ff: 0x00c0, + // Block 0x8, offset 0x200 + 0x200: 0x00c0, 0x201: 0x00c0, 0x202: 0x00c0, 0x203: 0x00c0, 0x204: 0x0080, 0x205: 0x0080, + 0x206: 0x0080, 0x207: 0x0080, 0x208: 0x0080, 0x209: 0x0080, 0x20a: 0x0080, 0x20b: 0x0080, + 0x20c: 0x0080, 0x20d: 0x00c0, 0x20e: 0x00c0, 0x20f: 0x00c0, 0x210: 0x00c0, 0x211: 0x00c0, + 0x212: 0x00c0, 0x213: 0x00c0, 0x214: 0x00c0, 0x215: 0x00c0, 0x216: 0x00c0, 0x217: 0x00c0, + 0x218: 0x00c0, 0x219: 0x00c0, 0x21a: 0x00c0, 0x21b: 0x00c0, 0x21c: 0x00c0, 0x21d: 0x00c0, + 0x21e: 0x00c0, 0x21f: 0x00c0, 0x220: 0x00c0, 0x221: 0x00c0, 0x222: 0x00c0, 0x223: 0x00c0, + 0x224: 0x00c0, 0x225: 0x00c0, 0x226: 0x00c0, 0x227: 0x00c0, 0x228: 0x00c0, 0x229: 0x00c0, + 0x22a: 0x00c0, 0x22b: 0x00c0, 0x22c: 0x00c0, 0x22d: 0x00c0, 0x22e: 0x00c0, 0x22f: 0x00c0, + 0x230: 0x00c0, 0x231: 0x0080, 0x232: 0x0080, 0x233: 0x0080, 0x234: 0x00c0, 0x235: 0x00c0, + 0x236: 0x00c0, 0x237: 0x00c0, 0x238: 0x00c0, 0x239: 0x00c0, 0x23a: 0x00c0, 0x23b: 0x00c0, + 0x23c: 0x00c0, 0x23d: 0x00c0, 0x23e: 0x00c0, 0x23f: 0x00c0, + // Block 0x9, offset 0x240 + 0x240: 0x00c0, 0x241: 0x00c0, 0x242: 0x00c0, 0x243: 0x00c0, 0x244: 0x00c0, 0x245: 0x00c0, + 0x246: 0x00c0, 0x247: 0x00c0, 0x248: 0x00c0, 0x249: 0x00c0, 0x24a: 0x00c0, 0x24b: 0x00c0, + 0x24c: 0x00c0, 0x24d: 0x00c0, 0x24e: 0x00c0, 0x24f: 0x00c0, 0x250: 0x00c0, 0x251: 0x00c0, + 0x252: 0x00c0, 0x253: 0x00c0, 0x254: 0x00c0, 0x255: 0x00c0, 0x256: 0x00c0, 0x257: 0x00c0, + 0x258: 0x00c0, 0x259: 0x00c0, 0x25a: 0x00c0, 0x25b: 0x00c0, 0x25c: 0x00c0, 0x25d: 0x00c0, + 0x25e: 0x00c0, 0x25f: 0x00c0, 0x260: 0x00c0, 0x261: 0x00c0, 0x262: 0x00c0, 0x263: 0x00c0, + 0x264: 0x00c0, 0x265: 0x00c0, 0x266: 0x00c0, 0x267: 0x00c0, 0x268: 0x00c0, 0x269: 0x00c0, + 0x26a: 0x00c0, 0x26b: 0x00c0, 0x26c: 0x00c0, 0x26d: 0x00c0, 0x26e: 0x00c0, 0x26f: 0x00c0, + 0x270: 0x0080, 0x271: 0x0080, 0x272: 0x0080, 0x273: 0x0080, 0x274: 0x0080, 0x275: 0x0080, + 0x276: 0x0080, 0x277: 0x0080, 0x278: 0x0080, 0x279: 0x00c0, 0x27a: 0x00c0, 0x27b: 0x00c0, + 0x27c: 0x00c0, 0x27d: 0x00c0, 0x27e: 0x00c0, 0x27f: 0x00c0, + // Block 0xa, offset 0x280 + 0x280: 0x00c0, 0x281: 0x00c0, 0x282: 0x0080, 0x283: 0x0080, 0x284: 0x0080, 0x285: 0x0080, + 0x286: 0x00c0, 0x287: 0x00c0, 0x288: 0x00c0, 0x289: 0x00c0, 0x28a: 0x00c0, 0x28b: 0x00c0, + 0x28c: 0x00c0, 0x28d: 0x00c0, 0x28e: 0x00c0, 0x28f: 0x00c0, 0x290: 0x00c0, 0x291: 0x00c0, + 0x292: 0x0080, 0x293: 0x0080, 0x294: 0x0080, 0x295: 0x0080, 0x296: 0x0080, 0x297: 0x0080, + 0x298: 0x0080, 0x299: 0x0080, 0x29a: 0x0080, 0x29b: 0x0080, 0x29c: 0x0080, 0x29d: 0x0080, + 0x29e: 0x0080, 0x29f: 0x0080, 0x2a0: 0x0080, 0x2a1: 0x0080, 0x2a2: 0x0080, 0x2a3: 0x0080, + 0x2a4: 0x0080, 0x2a5: 0x0080, 0x2a6: 0x0080, 0x2a7: 0x0080, 0x2a8: 0x0080, 0x2a9: 0x0080, + 0x2aa: 0x0080, 0x2ab: 0x0080, 0x2ac: 0x00c0, 0x2ad: 0x0080, 0x2ae: 0x00c0, 0x2af: 0x0080, + 0x2b0: 0x0080, 0x2b1: 0x0080, 0x2b2: 0x0080, 0x2b3: 0x0080, 0x2b4: 0x0080, 0x2b5: 0x0080, + 0x2b6: 0x0080, 0x2b7: 0x0080, 0x2b8: 0x0080, 0x2b9: 0x0080, 0x2ba: 0x0080, 0x2bb: 0x0080, + 0x2bc: 0x0080, 0x2bd: 0x0080, 0x2be: 0x0080, 0x2bf: 0x0080, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x00c3, 0x2c1: 0x00c3, 0x2c2: 0x00c3, 0x2c3: 0x00c3, 0x2c4: 0x00c3, 0x2c5: 0x00c3, + 0x2c6: 0x00c3, 0x2c7: 0x00c3, 0x2c8: 0x00c3, 0x2c9: 0x00c3, 0x2ca: 0x00c3, 0x2cb: 0x00c3, + 0x2cc: 0x00c3, 0x2cd: 0x00c3, 0x2ce: 0x00c3, 0x2cf: 0x00c3, 0x2d0: 0x00c3, 0x2d1: 0x00c3, + 0x2d2: 0x00c3, 0x2d3: 0x00c3, 0x2d4: 0x00c3, 0x2d5: 0x00c3, 0x2d6: 0x00c3, 0x2d7: 0x00c3, + 0x2d8: 0x00c3, 0x2d9: 0x00c3, 0x2da: 0x00c3, 0x2db: 0x00c3, 0x2dc: 0x00c3, 0x2dd: 0x00c3, + 0x2de: 0x00c3, 0x2df: 0x00c3, 0x2e0: 0x00c3, 0x2e1: 0x00c3, 0x2e2: 0x00c3, 0x2e3: 0x00c3, + 0x2e4: 0x00c3, 0x2e5: 0x00c3, 0x2e6: 0x00c3, 0x2e7: 0x00c3, 0x2e8: 0x00c3, 0x2e9: 0x00c3, + 0x2ea: 0x00c3, 0x2eb: 0x00c3, 0x2ec: 0x00c3, 0x2ed: 0x00c3, 0x2ee: 0x00c3, 0x2ef: 0x00c3, + 0x2f0: 0x00c3, 0x2f1: 0x00c3, 0x2f2: 0x00c3, 0x2f3: 0x00c3, 0x2f4: 0x00c3, 0x2f5: 0x00c3, + 0x2f6: 0x00c3, 0x2f7: 0x00c3, 0x2f8: 0x00c3, 0x2f9: 0x00c3, 0x2fa: 0x00c3, 0x2fb: 0x00c3, + 0x2fc: 0x00c3, 0x2fd: 0x00c3, 0x2fe: 0x00c3, 0x2ff: 0x00c3, + // Block 0xc, offset 0x300 + 0x300: 0x0083, 0x301: 0x0083, 0x302: 0x00c3, 0x303: 0x0083, 0x304: 0x0083, 0x305: 0x00c3, + 0x306: 0x00c3, 0x307: 0x00c3, 0x308: 0x00c3, 0x309: 0x00c3, 0x30a: 0x00c3, 0x30b: 0x00c3, + 0x30c: 0x00c3, 0x30d: 0x00c3, 0x30e: 0x00c3, 0x30f: 0x0040, 0x310: 0x00c3, 0x311: 0x00c3, + 0x312: 0x00c3, 0x313: 0x00c3, 0x314: 0x00c3, 0x315: 0x00c3, 0x316: 0x00c3, 0x317: 0x00c3, + 0x318: 0x00c3, 0x319: 0x00c3, 0x31a: 0x00c3, 0x31b: 0x00c3, 0x31c: 0x00c3, 0x31d: 0x00c3, + 0x31e: 0x00c3, 0x31f: 0x00c3, 0x320: 0x00c3, 0x321: 0x00c3, 0x322: 0x00c3, 0x323: 0x00c3, + 0x324: 0x00c3, 0x325: 0x00c3, 0x326: 0x00c3, 0x327: 0x00c3, 0x328: 0x00c3, 0x329: 0x00c3, + 0x32a: 0x00c3, 0x32b: 0x00c3, 0x32c: 0x00c3, 0x32d: 0x00c3, 0x32e: 0x00c3, 0x32f: 0x00c3, + 0x330: 0x00c8, 0x331: 0x00c8, 0x332: 0x00c8, 0x333: 0x00c8, 0x334: 0x0080, 0x335: 0x0050, + 0x336: 0x00c8, 0x337: 0x00c8, 0x33a: 0x0088, 0x33b: 0x00c8, + 0x33c: 0x00c8, 0x33d: 0x00c8, 0x33e: 0x0080, 0x33f: 0x00c8, + // Block 0xd, offset 0x340 + 0x344: 0x0088, 0x345: 0x0080, + 0x346: 0x00c8, 0x347: 0x0080, 0x348: 0x00c8, 0x349: 0x00c8, 0x34a: 0x00c8, + 0x34c: 0x00c8, 0x34e: 0x00c8, 0x34f: 0x00c8, 0x350: 0x00c8, 0x351: 0x00c8, + 0x352: 0x00c8, 0x353: 0x00c8, 0x354: 0x00c8, 0x355: 0x00c8, 0x356: 0x00c8, 0x357: 0x00c8, + 0x358: 0x00c8, 0x359: 0x00c8, 0x35a: 0x00c8, 0x35b: 0x00c8, 0x35c: 0x00c8, 0x35d: 0x00c8, + 0x35e: 0x00c8, 0x35f: 0x00c8, 0x360: 0x00c8, 0x361: 0x00c8, 0x363: 0x00c8, + 0x364: 0x00c8, 0x365: 0x00c8, 0x366: 0x00c8, 0x367: 0x00c8, 0x368: 0x00c8, 0x369: 0x00c8, + 0x36a: 0x00c8, 0x36b: 0x00c8, 0x36c: 0x00c8, 0x36d: 0x00c8, 0x36e: 0x00c8, 0x36f: 0x00c8, + 0x370: 0x00c8, 0x371: 0x00c8, 0x372: 0x00c8, 0x373: 0x00c8, 0x374: 0x00c8, 0x375: 0x00c8, + 0x376: 0x00c8, 0x377: 0x00c8, 0x378: 0x00c8, 0x379: 0x00c8, 0x37a: 0x00c8, 0x37b: 0x00c8, + 0x37c: 0x00c8, 0x37d: 0x00c8, 0x37e: 0x00c8, 0x37f: 0x00c8, + // Block 0xe, offset 0x380 + 0x380: 0x00c8, 0x381: 0x00c8, 0x382: 0x00c8, 0x383: 0x00c8, 0x384: 0x00c8, 0x385: 0x00c8, + 0x386: 0x00c8, 0x387: 0x00c8, 0x388: 0x00c8, 0x389: 0x00c8, 0x38a: 0x00c8, 0x38b: 0x00c8, + 0x38c: 0x00c8, 0x38d: 0x00c8, 0x38e: 0x00c8, 0x38f: 0x00c8, 0x390: 0x0088, 0x391: 0x0088, + 0x392: 0x0088, 0x393: 0x0088, 0x394: 0x0088, 0x395: 0x0088, 0x396: 0x0088, 0x397: 0x00c8, + 0x398: 0x00c8, 0x399: 0x00c8, 0x39a: 0x00c8, 0x39b: 0x00c8, 0x39c: 0x00c8, 0x39d: 0x00c8, + 0x39e: 0x00c8, 0x39f: 0x00c8, 0x3a0: 0x00c8, 0x3a1: 0x00c8, 0x3a2: 0x00c0, 0x3a3: 0x00c0, + 0x3a4: 0x00c0, 0x3a5: 0x00c0, 0x3a6: 0x00c0, 0x3a7: 0x00c0, 0x3a8: 0x00c0, 0x3a9: 0x00c0, + 0x3aa: 0x00c0, 0x3ab: 0x00c0, 0x3ac: 0x00c0, 0x3ad: 0x00c0, 0x3ae: 0x00c0, 0x3af: 0x00c0, + 0x3b0: 0x0088, 0x3b1: 0x0088, 0x3b2: 0x0088, 0x3b3: 0x00c8, 0x3b4: 0x0088, 0x3b5: 0x0088, + 0x3b6: 0x0088, 0x3b7: 0x00c8, 0x3b8: 0x00c8, 0x3b9: 0x0088, 0x3ba: 0x00c8, 0x3bb: 0x00c8, + 0x3bc: 0x00c8, 0x3bd: 0x00c8, 0x3be: 0x00c8, 0x3bf: 0x00c8, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x00c0, 0x3c1: 0x00c0, 0x3c2: 0x0080, 0x3c3: 0x00c3, 0x3c4: 0x00c3, 0x3c5: 0x00c3, + 0x3c6: 0x00c3, 0x3c7: 0x00c3, 0x3c8: 0x0083, 0x3c9: 0x0083, 0x3ca: 0x00c0, 0x3cb: 0x00c0, + 0x3cc: 0x00c0, 0x3cd: 0x00c0, 0x3ce: 0x00c0, 0x3cf: 0x00c0, 0x3d0: 0x00c0, 0x3d1: 0x00c0, + 0x3d2: 0x00c0, 0x3d3: 0x00c0, 0x3d4: 0x00c0, 0x3d5: 0x00c0, 0x3d6: 0x00c0, 0x3d7: 0x00c0, + 0x3d8: 0x00c0, 0x3d9: 0x00c0, 0x3da: 0x00c0, 0x3db: 0x00c0, 0x3dc: 0x00c0, 0x3dd: 0x00c0, + 0x3de: 0x00c0, 0x3df: 0x00c0, 0x3e0: 0x00c0, 0x3e1: 0x00c0, 0x3e2: 0x00c0, 0x3e3: 0x00c0, + 0x3e4: 0x00c0, 0x3e5: 0x00c0, 0x3e6: 0x00c0, 0x3e7: 0x00c0, 0x3e8: 0x00c0, 0x3e9: 0x00c0, + 0x3ea: 0x00c0, 0x3eb: 0x00c0, 0x3ec: 0x00c0, 0x3ed: 0x00c0, 0x3ee: 0x00c0, 0x3ef: 0x00c0, + 0x3f0: 0x00c0, 0x3f1: 0x00c0, 0x3f2: 0x00c0, 0x3f3: 0x00c0, 0x3f4: 0x00c0, 0x3f5: 0x00c0, + 0x3f6: 0x00c0, 0x3f7: 0x00c0, 0x3f8: 0x00c0, 0x3f9: 0x00c0, 0x3fa: 0x00c0, 0x3fb: 0x00c0, + 0x3fc: 0x00c0, 0x3fd: 0x00c0, 0x3fe: 0x00c0, 0x3ff: 0x00c0, + // Block 0x10, offset 0x400 + 0x400: 0x00c0, 0x401: 0x00c0, 0x402: 0x00c0, 0x403: 0x00c0, 0x404: 0x00c0, 0x405: 0x00c0, + 0x406: 0x00c0, 0x407: 0x00c0, 0x408: 0x00c0, 0x409: 0x00c0, 0x40a: 0x00c0, 0x40b: 0x00c0, + 0x40c: 0x00c0, 0x40d: 0x00c0, 0x40e: 0x00c0, 0x40f: 0x00c0, 0x410: 0x00c0, 0x411: 0x00c0, + 0x412: 0x00c0, 0x413: 0x00c0, 0x414: 0x00c0, 0x415: 0x00c0, 0x416: 0x00c0, 0x417: 0x00c0, + 0x418: 0x00c0, 0x419: 0x00c0, 0x41a: 0x00c0, 0x41b: 0x00c0, 0x41c: 0x00c0, 0x41d: 0x00c0, + 0x41e: 0x00c0, 0x41f: 0x00c0, 0x420: 0x00c0, 0x421: 0x00c0, 0x422: 0x00c0, 0x423: 0x00c0, + 0x424: 0x00c0, 0x425: 0x00c0, 0x426: 0x00c0, 0x427: 0x00c0, 0x428: 0x00c0, 0x429: 0x00c0, + 0x42a: 0x00c0, 0x42b: 0x00c0, 0x42c: 0x00c0, 0x42d: 0x00c0, 0x42e: 0x00c0, 0x42f: 0x00c0, + 0x431: 0x00c0, 0x432: 0x00c0, 0x433: 0x00c0, 0x434: 0x00c0, 0x435: 0x00c0, + 0x436: 0x00c0, 0x437: 0x00c0, 0x438: 0x00c0, 0x439: 0x00c0, 0x43a: 0x00c0, 0x43b: 0x00c0, + 0x43c: 0x00c0, 0x43d: 0x00c0, 0x43e: 0x00c0, 0x43f: 0x00c0, + // Block 0x11, offset 0x440 + 0x440: 0x00c0, 0x441: 0x00c0, 0x442: 0x00c0, 0x443: 0x00c0, 0x444: 0x00c0, 0x445: 0x00c0, + 0x446: 0x00c0, 0x447: 0x00c0, 0x448: 0x00c0, 0x449: 0x00c0, 0x44a: 0x00c0, 0x44b: 0x00c0, + 0x44c: 0x00c0, 0x44d: 0x00c0, 0x44e: 0x00c0, 0x44f: 0x00c0, 0x450: 0x00c0, 0x451: 0x00c0, + 0x452: 0x00c0, 0x453: 0x00c0, 0x454: 0x00c0, 0x455: 0x00c0, 0x456: 0x00c0, + 0x459: 0x00c0, 0x45a: 0x0080, 0x45b: 0x0080, 0x45c: 0x0080, 0x45d: 0x0080, + 0x45e: 0x0080, 0x45f: 0x0080, 0x461: 0x00c0, 0x462: 0x00c0, 0x463: 0x00c0, + 0x464: 0x00c0, 0x465: 0x00c0, 0x466: 0x00c0, 0x467: 0x00c0, 0x468: 0x00c0, 0x469: 0x00c0, + 0x46a: 0x00c0, 0x46b: 0x00c0, 0x46c: 0x00c0, 0x46d: 0x00c0, 0x46e: 0x00c0, 0x46f: 0x00c0, + 0x470: 0x00c0, 0x471: 0x00c0, 0x472: 0x00c0, 0x473: 0x00c0, 0x474: 0x00c0, 0x475: 0x00c0, + 0x476: 0x00c0, 0x477: 0x00c0, 0x478: 0x00c0, 0x479: 0x00c0, 0x47a: 0x00c0, 0x47b: 0x00c0, + 0x47c: 0x00c0, 0x47d: 0x00c0, 0x47e: 0x00c0, 0x47f: 0x00c0, + // Block 0x12, offset 0x480 + 0x480: 0x00c0, 0x481: 0x00c0, 0x482: 0x00c0, 0x483: 0x00c0, 0x484: 0x00c0, 0x485: 0x00c0, + 0x486: 0x00c0, 0x487: 0x0080, 0x489: 0x0080, 0x48a: 0x0080, + 0x48d: 0x0080, 0x48e: 0x0080, 0x48f: 0x0080, 0x491: 0x00cb, + 0x492: 0x00cb, 0x493: 0x00cb, 0x494: 0x00cb, 0x495: 0x00cb, 0x496: 0x00cb, 0x497: 0x00cb, + 0x498: 0x00cb, 0x499: 0x00cb, 0x49a: 0x00cb, 0x49b: 0x00cb, 0x49c: 0x00cb, 0x49d: 0x00cb, + 0x49e: 0x00cb, 0x49f: 0x00cb, 0x4a0: 0x00cb, 0x4a1: 0x00cb, 0x4a2: 0x00cb, 0x4a3: 0x00cb, + 0x4a4: 0x00cb, 0x4a5: 0x00cb, 0x4a6: 0x00cb, 0x4a7: 0x00cb, 0x4a8: 0x00cb, 0x4a9: 0x00cb, + 0x4aa: 0x00cb, 0x4ab: 0x00cb, 0x4ac: 0x00cb, 0x4ad: 0x00cb, 0x4ae: 0x00cb, 0x4af: 0x00cb, + 0x4b0: 0x00cb, 0x4b1: 0x00cb, 0x4b2: 0x00cb, 0x4b3: 0x00cb, 0x4b4: 0x00cb, 0x4b5: 0x00cb, + 0x4b6: 0x00cb, 0x4b7: 0x00cb, 0x4b8: 0x00cb, 0x4b9: 0x00cb, 0x4ba: 0x00cb, 0x4bb: 0x00cb, + 0x4bc: 0x00cb, 0x4bd: 0x00cb, 0x4be: 0x008a, 0x4bf: 0x00cb, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x008a, 0x4c1: 0x00cb, 0x4c2: 0x00cb, 0x4c3: 0x008a, 0x4c4: 0x00cb, 0x4c5: 0x00cb, + 0x4c6: 0x008a, 0x4c7: 0x00cb, + 0x4d0: 0x00ca, 0x4d1: 0x00ca, + 0x4d2: 0x00ca, 0x4d3: 0x00ca, 0x4d4: 0x00ca, 0x4d5: 0x00ca, 0x4d6: 0x00ca, 0x4d7: 0x00ca, + 0x4d8: 0x00ca, 0x4d9: 0x00ca, 0x4da: 0x00ca, 0x4db: 0x00ca, 0x4dc: 0x00ca, 0x4dd: 0x00ca, + 0x4de: 0x00ca, 0x4df: 0x00ca, 0x4e0: 0x00ca, 0x4e1: 0x00ca, 0x4e2: 0x00ca, 0x4e3: 0x00ca, + 0x4e4: 0x00ca, 0x4e5: 0x00ca, 0x4e6: 0x00ca, 0x4e7: 0x00ca, 0x4e8: 0x00ca, 0x4e9: 0x00ca, + 0x4ea: 0x00ca, + 0x4f0: 0x00ca, 0x4f1: 0x00ca, 0x4f2: 0x00ca, 0x4f3: 0x0051, 0x4f4: 0x0051, + // Block 0x14, offset 0x500 + 0x500: 0x0040, 0x501: 0x0040, 0x502: 0x0040, 0x503: 0x0040, 0x504: 0x0040, 0x505: 0x0040, + 0x506: 0x0080, 0x507: 0x0080, 0x508: 0x0080, 0x509: 0x0080, 0x50a: 0x0080, 0x50b: 0x0080, + 0x50c: 0x0080, 0x50d: 0x0080, 0x50e: 0x0080, 0x50f: 0x0080, 0x510: 0x00c3, 0x511: 0x00c3, + 0x512: 0x00c3, 0x513: 0x00c3, 0x514: 0x00c3, 0x515: 0x00c3, 0x516: 0x00c3, 0x517: 0x00c3, + 0x518: 0x00c3, 0x519: 0x00c3, 0x51a: 0x00c3, 0x51b: 0x0080, 0x51c: 0x0040, + 0x51e: 0x0080, 0x51f: 0x0080, 0x520: 0x00c2, 0x521: 0x00c0, 0x522: 0x00c4, 0x523: 0x00c4, + 0x524: 0x00c4, 0x525: 0x00c4, 0x526: 0x00c2, 0x527: 0x00c4, 0x528: 0x00c2, 0x529: 0x00c4, + 0x52a: 0x00c2, 0x52b: 0x00c2, 0x52c: 0x00c2, 0x52d: 0x00c2, 0x52e: 0x00c2, 0x52f: 0x00c4, + 0x530: 0x00c4, 0x531: 0x00c4, 0x532: 0x00c4, 0x533: 0x00c2, 0x534: 0x00c2, 0x535: 0x00c2, + 0x536: 0x00c2, 0x537: 0x00c2, 0x538: 0x00c2, 0x539: 0x00c2, 0x53a: 0x00c2, 0x53b: 0x00c2, + 0x53c: 0x00c2, 0x53d: 0x00c2, 0x53e: 0x00c2, 0x53f: 0x00c2, + // Block 0x15, offset 0x540 + 0x540: 0x0040, 0x541: 0x00c2, 0x542: 0x00c2, 0x543: 0x00c2, 0x544: 0x00c2, 0x545: 0x00c2, + 0x546: 0x00c2, 0x547: 0x00c2, 0x548: 0x00c4, 0x549: 0x00c2, 0x54a: 0x00c2, 0x54b: 0x00c3, + 0x54c: 0x00c3, 0x54d: 0x00c3, 0x54e: 0x00c3, 0x54f: 0x00c3, 0x550: 0x00c3, 0x551: 0x00c3, + 0x552: 0x00c3, 0x553: 0x00c3, 0x554: 0x00c3, 0x555: 0x00c3, 0x556: 0x00c3, 0x557: 0x00c3, + 0x558: 0x00c3, 0x559: 0x00c3, 0x55a: 0x00c3, 0x55b: 0x00c3, 0x55c: 0x00c3, 0x55d: 0x00c3, + 0x55e: 0x00c3, 0x55f: 0x00c3, 0x560: 0x0053, 0x561: 0x0053, 0x562: 0x0053, 0x563: 0x0053, + 0x564: 0x0053, 0x565: 0x0053, 0x566: 0x0053, 0x567: 0x0053, 0x568: 0x0053, 0x569: 0x0053, + 0x56a: 0x0080, 0x56b: 0x0080, 0x56c: 0x0080, 0x56d: 0x0080, 0x56e: 0x00c2, 0x56f: 0x00c2, + 0x570: 0x00c3, 0x571: 0x00c4, 0x572: 0x00c4, 0x573: 0x00c4, 0x574: 0x00c0, 0x575: 0x0084, + 0x576: 0x0084, 0x577: 0x0084, 0x578: 0x0082, 0x579: 0x00c2, 0x57a: 0x00c2, 0x57b: 0x00c2, + 0x57c: 0x00c2, 0x57d: 0x00c2, 0x57e: 0x00c2, 0x57f: 0x00c2, + // Block 0x16, offset 0x580 + 0x580: 0x00c2, 0x581: 0x00c2, 0x582: 0x00c2, 0x583: 0x00c2, 0x584: 0x00c2, 0x585: 0x00c2, + 0x586: 0x00c2, 0x587: 0x00c2, 0x588: 0x00c4, 0x589: 0x00c4, 0x58a: 0x00c4, 0x58b: 0x00c4, + 0x58c: 0x00c4, 0x58d: 0x00c4, 0x58e: 0x00c4, 0x58f: 0x00c4, 0x590: 0x00c4, 0x591: 0x00c4, + 0x592: 0x00c4, 0x593: 0x00c4, 0x594: 0x00c4, 0x595: 0x00c4, 0x596: 0x00c4, 0x597: 0x00c4, + 0x598: 0x00c4, 0x599: 0x00c4, 0x59a: 0x00c2, 0x59b: 0x00c2, 0x59c: 0x00c2, 0x59d: 0x00c2, + 0x59e: 0x00c2, 0x59f: 0x00c2, 0x5a0: 0x00c2, 0x5a1: 0x00c2, 0x5a2: 0x00c2, 0x5a3: 0x00c2, + 0x5a4: 0x00c2, 0x5a5: 0x00c2, 0x5a6: 0x00c2, 0x5a7: 0x00c2, 0x5a8: 0x00c2, 0x5a9: 0x00c2, + 0x5aa: 0x00c2, 0x5ab: 0x00c2, 0x5ac: 0x00c2, 0x5ad: 0x00c2, 0x5ae: 0x00c2, 0x5af: 0x00c2, + 0x5b0: 0x00c2, 0x5b1: 0x00c2, 0x5b2: 0x00c2, 0x5b3: 0x00c2, 0x5b4: 0x00c2, 0x5b5: 0x00c2, + 0x5b6: 0x00c2, 0x5b7: 0x00c2, 0x5b8: 0x00c2, 0x5b9: 0x00c2, 0x5ba: 0x00c2, 0x5bb: 0x00c2, + 0x5bc: 0x00c2, 0x5bd: 0x00c2, 0x5be: 0x00c2, 0x5bf: 0x00c2, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x00c4, 0x5c1: 0x00c2, 0x5c2: 0x00c2, 0x5c3: 0x00c4, 0x5c4: 0x00c4, 0x5c5: 0x00c4, + 0x5c6: 0x00c4, 0x5c7: 0x00c4, 0x5c8: 0x00c4, 0x5c9: 0x00c4, 0x5ca: 0x00c4, 0x5cb: 0x00c4, + 0x5cc: 0x00c2, 0x5cd: 0x00c4, 0x5ce: 0x00c2, 0x5cf: 0x00c4, 0x5d0: 0x00c2, 0x5d1: 0x00c2, + 0x5d2: 0x00c4, 0x5d3: 0x00c4, 0x5d4: 0x0080, 0x5d5: 0x00c4, 0x5d6: 0x00c3, 0x5d7: 0x00c3, + 0x5d8: 0x00c3, 0x5d9: 0x00c3, 0x5da: 0x00c3, 0x5db: 0x00c3, 0x5dc: 0x00c3, 0x5dd: 0x0040, + 0x5de: 0x0080, 0x5df: 0x00c3, 0x5e0: 0x00c3, 0x5e1: 0x00c3, 0x5e2: 0x00c3, 0x5e3: 0x00c3, + 0x5e4: 0x00c3, 0x5e5: 0x00c0, 0x5e6: 0x00c0, 0x5e7: 0x00c3, 0x5e8: 0x00c3, 0x5e9: 0x0080, + 0x5ea: 0x00c3, 0x5eb: 0x00c3, 0x5ec: 0x00c3, 0x5ed: 0x00c3, 0x5ee: 0x00c4, 0x5ef: 0x00c4, + 0x5f0: 0x0054, 0x5f1: 0x0054, 0x5f2: 0x0054, 0x5f3: 0x0054, 0x5f4: 0x0054, 0x5f5: 0x0054, + 0x5f6: 0x0054, 0x5f7: 0x0054, 0x5f8: 0x0054, 0x5f9: 0x0054, 0x5fa: 0x00c2, 0x5fb: 0x00c2, + 0x5fc: 0x00c2, 0x5fd: 0x00c0, 0x5fe: 0x00c0, 0x5ff: 0x00c2, + // Block 0x18, offset 0x600 + 0x600: 0x0080, 0x601: 0x0080, 0x602: 0x0080, 0x603: 0x0080, 0x604: 0x0080, 0x605: 0x0080, + 0x606: 0x0080, 0x607: 0x0080, 0x608: 0x0080, 0x609: 0x0080, 0x60a: 0x0080, 0x60b: 0x0080, + 0x60c: 0x0080, 0x60d: 0x0080, 0x60f: 0x0040, 0x610: 0x00c4, 0x611: 0x00c3, + 0x612: 0x00c2, 0x613: 0x00c2, 0x614: 0x00c2, 0x615: 0x00c4, 0x616: 0x00c4, 0x617: 0x00c4, + 0x618: 0x00c4, 0x619: 0x00c4, 0x61a: 0x00c2, 0x61b: 0x00c2, 0x61c: 0x00c2, 0x61d: 0x00c2, + 0x61e: 0x00c4, 0x61f: 0x00c2, 0x620: 0x00c2, 0x621: 0x00c2, 0x622: 0x00c2, 0x623: 0x00c2, + 0x624: 0x00c2, 0x625: 0x00c2, 0x626: 0x00c2, 0x627: 0x00c2, 0x628: 0x00c4, 0x629: 0x00c2, + 0x62a: 0x00c4, 0x62b: 0x00c2, 0x62c: 0x00c4, 0x62d: 0x00c2, 0x62e: 0x00c2, 0x62f: 0x00c4, + 0x630: 0x00c3, 0x631: 0x00c3, 0x632: 0x00c3, 0x633: 0x00c3, 0x634: 0x00c3, 0x635: 0x00c3, + 0x636: 0x00c3, 0x637: 0x00c3, 0x638: 0x00c3, 0x639: 0x00c3, 0x63a: 0x00c3, 0x63b: 0x00c3, + 0x63c: 0x00c3, 0x63d: 0x00c3, 0x63e: 0x00c3, 0x63f: 0x00c3, + // Block 0x19, offset 0x640 + 0x640: 0x00c3, 0x641: 0x00c3, 0x642: 0x00c3, 0x643: 0x00c3, 0x644: 0x00c3, 0x645: 0x00c3, + 0x646: 0x00c3, 0x647: 0x00c3, 0x648: 0x00c3, 0x649: 0x00c3, 0x64a: 0x00c3, + 0x64d: 0x00c4, 0x64e: 0x00c2, 0x64f: 0x00c2, 0x650: 0x00c2, 0x651: 0x00c2, + 0x652: 0x00c2, 0x653: 0x00c2, 0x654: 0x00c2, 0x655: 0x00c2, 0x656: 0x00c2, 0x657: 0x00c2, + 0x658: 0x00c2, 0x659: 0x00c4, 0x65a: 0x00c4, 0x65b: 0x00c4, 0x65c: 0x00c2, 0x65d: 0x00c2, + 0x65e: 0x00c2, 0x65f: 0x00c2, 0x660: 0x00c2, 0x661: 0x00c2, 0x662: 0x00c2, 0x663: 0x00c2, + 0x664: 0x00c2, 0x665: 0x00c2, 0x666: 0x00c2, 0x667: 0x00c2, 0x668: 0x00c2, 0x669: 0x00c2, + 0x66a: 0x00c2, 0x66b: 0x00c4, 0x66c: 0x00c4, 0x66d: 0x00c2, 0x66e: 0x00c2, 0x66f: 0x00c2, + 0x670: 0x00c2, 0x671: 0x00c4, 0x672: 0x00c2, 0x673: 0x00c4, 0x674: 0x00c4, 0x675: 0x00c2, + 0x676: 0x00c2, 0x677: 0x00c2, 0x678: 0x00c4, 0x679: 0x00c4, 0x67a: 0x00c2, 0x67b: 0x00c2, + 0x67c: 0x00c2, 0x67d: 0x00c2, 0x67e: 0x00c2, 0x67f: 0x00c2, + // Block 0x1a, offset 0x680 + 0x680: 0x00c0, 0x681: 0x00c0, 0x682: 0x00c0, 0x683: 0x00c0, 0x684: 0x00c0, 0x685: 0x00c0, + 0x686: 0x00c0, 0x687: 0x00c0, 0x688: 0x00c0, 0x689: 0x00c0, 0x68a: 0x00c0, 0x68b: 0x00c0, + 0x68c: 0x00c0, 0x68d: 0x00c0, 0x68e: 0x00c0, 0x68f: 0x00c0, 0x690: 0x00c0, 0x691: 0x00c0, + 0x692: 0x00c0, 0x693: 0x00c0, 0x694: 0x00c0, 0x695: 0x00c0, 0x696: 0x00c0, 0x697: 0x00c0, + 0x698: 0x00c0, 0x699: 0x00c0, 0x69a: 0x00c0, 0x69b: 0x00c0, 0x69c: 0x00c0, 0x69d: 0x00c0, + 0x69e: 0x00c0, 0x69f: 0x00c0, 0x6a0: 0x00c0, 0x6a1: 0x00c0, 0x6a2: 0x00c0, 0x6a3: 0x00c0, + 0x6a4: 0x00c0, 0x6a5: 0x00c0, 0x6a6: 0x00c3, 0x6a7: 0x00c3, 0x6a8: 0x00c3, 0x6a9: 0x00c3, + 0x6aa: 0x00c3, 0x6ab: 0x00c3, 0x6ac: 0x00c3, 0x6ad: 0x00c3, 0x6ae: 0x00c3, 0x6af: 0x00c3, + 0x6b0: 0x00c3, 0x6b1: 0x00c0, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x00c0, 0x6c1: 0x00c0, 0x6c2: 0x00c0, 0x6c3: 0x00c0, 0x6c4: 0x00c0, 0x6c5: 0x00c0, + 0x6c6: 0x00c0, 0x6c7: 0x00c0, 0x6c8: 0x00c0, 0x6c9: 0x00c0, 0x6ca: 0x00c2, 0x6cb: 0x00c2, + 0x6cc: 0x00c2, 0x6cd: 0x00c2, 0x6ce: 0x00c2, 0x6cf: 0x00c2, 0x6d0: 0x00c2, 0x6d1: 0x00c2, + 0x6d2: 0x00c2, 0x6d3: 0x00c2, 0x6d4: 0x00c2, 0x6d5: 0x00c2, 0x6d6: 0x00c2, 0x6d7: 0x00c2, + 0x6d8: 0x00c2, 0x6d9: 0x00c2, 0x6da: 0x00c2, 0x6db: 0x00c2, 0x6dc: 0x00c2, 0x6dd: 0x00c2, + 0x6de: 0x00c2, 0x6df: 0x00c2, 0x6e0: 0x00c2, 0x6e1: 0x00c2, 0x6e2: 0x00c2, 0x6e3: 0x00c2, + 0x6e4: 0x00c2, 0x6e5: 0x00c2, 0x6e6: 0x00c2, 0x6e7: 0x00c2, 0x6e8: 0x00c2, 0x6e9: 0x00c2, + 0x6ea: 0x00c2, 0x6eb: 0x00c3, 0x6ec: 0x00c3, 0x6ed: 0x00c3, 0x6ee: 0x00c3, 0x6ef: 0x00c3, + 0x6f0: 0x00c3, 0x6f1: 0x00c3, 0x6f2: 0x00c3, 0x6f3: 0x00c3, 0x6f4: 0x00c0, 0x6f5: 0x00c0, + 0x6f6: 0x0080, 0x6f7: 0x0080, 0x6f8: 0x0080, 0x6f9: 0x0080, 0x6fa: 0x0040, + // Block 0x1c, offset 0x700 + 0x700: 0x00c0, 0x701: 0x00c0, 0x702: 0x00c0, 0x703: 0x00c0, 0x704: 0x00c0, 0x705: 0x00c0, + 0x706: 0x00c0, 0x707: 0x00c0, 0x708: 0x00c0, 0x709: 0x00c0, 0x70a: 0x00c0, 0x70b: 0x00c0, + 0x70c: 0x00c0, 0x70d: 0x00c0, 0x70e: 0x00c0, 0x70f: 0x00c0, 0x710: 0x00c0, 0x711: 0x00c0, + 0x712: 0x00c0, 0x713: 0x00c0, 0x714: 0x00c0, 0x715: 0x00c0, 0x716: 0x00c3, 0x717: 0x00c3, + 0x718: 0x00c3, 0x719: 0x00c3, 0x71a: 0x00c0, 0x71b: 0x00c3, 0x71c: 0x00c3, 0x71d: 0x00c3, + 0x71e: 0x00c3, 0x71f: 0x00c3, 0x720: 0x00c3, 0x721: 0x00c3, 0x722: 0x00c3, 0x723: 0x00c3, + 0x724: 0x00c0, 0x725: 0x00c3, 0x726: 0x00c3, 0x727: 0x00c3, 0x728: 0x00c0, 0x729: 0x00c3, + 0x72a: 0x00c3, 0x72b: 0x00c3, 0x72c: 0x00c3, 0x72d: 0x00c3, + 0x730: 0x0080, 0x731: 0x0080, 0x732: 0x0080, 0x733: 0x0080, 0x734: 0x0080, 0x735: 0x0080, + 0x736: 0x0080, 0x737: 0x0080, 0x738: 0x0080, 0x739: 0x0080, 0x73a: 0x0080, 0x73b: 0x0080, + 0x73c: 0x0080, 0x73d: 0x0080, 0x73e: 0x0080, + // Block 0x1d, offset 0x740 + 0x740: 0x00c4, 0x741: 0x00c2, 0x742: 0x00c2, 0x743: 0x00c2, 0x744: 0x00c2, 0x745: 0x00c2, + 0x746: 0x00c4, 0x747: 0x00c4, 0x748: 0x00c2, 0x749: 0x00c4, 0x74a: 0x00c2, 0x74b: 0x00c2, + 0x74c: 0x00c2, 0x74d: 0x00c2, 0x74e: 0x00c2, 0x74f: 0x00c2, 0x750: 0x00c2, 0x751: 0x00c2, + 0x752: 0x00c2, 0x753: 0x00c2, 0x754: 0x00c4, 0x755: 0x00c2, 0x756: 0x00c0, 0x757: 0x00c0, + 0x758: 0x00c0, 0x759: 0x00c3, 0x75a: 0x00c3, 0x75b: 0x00c3, + 0x75e: 0x0080, + // Block 0x1e, offset 0x780 + 0x7a0: 0x00c2, 0x7a1: 0x00c2, 0x7a2: 0x00c2, 0x7a3: 0x00c2, + 0x7a4: 0x00c2, 0x7a5: 0x00c2, 0x7a6: 0x00c2, 0x7a7: 0x00c2, 0x7a8: 0x00c2, 0x7a9: 0x00c2, + 0x7aa: 0x00c4, 0x7ab: 0x00c4, 0x7ac: 0x00c4, 0x7ad: 0x00c0, 0x7ae: 0x00c4, 0x7af: 0x00c2, + 0x7b0: 0x00c2, 0x7b1: 0x00c4, 0x7b2: 0x00c4, 0x7b3: 0x00c2, 0x7b4: 0x00c2, + 0x7b6: 0x00c2, 0x7b7: 0x00c2, 0x7b8: 0x00c2, 0x7b9: 0x00c4, 0x7ba: 0x00c2, 0x7bb: 0x00c2, + 0x7bc: 0x00c2, 0x7bd: 0x00c2, + // Block 0x1f, offset 0x7c0 + 0x7d4: 0x00c3, 0x7d5: 0x00c3, 0x7d6: 0x00c3, 0x7d7: 0x00c3, + 0x7d8: 0x00c3, 0x7d9: 0x00c3, 0x7da: 0x00c3, 0x7db: 0x00c3, 0x7dc: 0x00c3, 0x7dd: 0x00c3, + 0x7de: 0x00c3, 0x7df: 0x00c3, 0x7e0: 0x00c3, 0x7e1: 0x00c3, 0x7e2: 0x0040, 0x7e3: 0x00c3, + 0x7e4: 0x00c3, 0x7e5: 0x00c3, 0x7e6: 0x00c3, 0x7e7: 0x00c3, 0x7e8: 0x00c3, 0x7e9: 0x00c3, + 0x7ea: 0x00c3, 0x7eb: 0x00c3, 0x7ec: 0x00c3, 0x7ed: 0x00c3, 0x7ee: 0x00c3, 0x7ef: 0x00c3, + 0x7f0: 0x00c3, 0x7f1: 0x00c3, 0x7f2: 0x00c3, 0x7f3: 0x00c3, 0x7f4: 0x00c3, 0x7f5: 0x00c3, + 0x7f6: 0x00c3, 0x7f7: 0x00c3, 0x7f8: 0x00c3, 0x7f9: 0x00c3, 0x7fa: 0x00c3, 0x7fb: 0x00c3, + 0x7fc: 0x00c3, 0x7fd: 0x00c3, 0x7fe: 0x00c3, 0x7ff: 0x00c3, + // Block 0x20, offset 0x800 + 0x800: 0x00c3, 0x801: 0x00c3, 0x802: 0x00c3, 0x803: 0x00c0, 0x804: 0x00c0, 0x805: 0x00c0, + 0x806: 0x00c0, 0x807: 0x00c0, 0x808: 0x00c0, 0x809: 0x00c0, 0x80a: 0x00c0, 0x80b: 0x00c0, + 0x80c: 0x00c0, 0x80d: 0x00c0, 0x80e: 0x00c0, 0x80f: 0x00c0, 0x810: 0x00c0, 0x811: 0x00c0, + 0x812: 0x00c0, 0x813: 0x00c0, 0x814: 0x00c0, 0x815: 0x00c0, 0x816: 0x00c0, 0x817: 0x00c0, + 0x818: 0x00c0, 0x819: 0x00c0, 0x81a: 0x00c0, 0x81b: 0x00c0, 0x81c: 0x00c0, 0x81d: 0x00c0, + 0x81e: 0x00c0, 0x81f: 0x00c0, 0x820: 0x00c0, 0x821: 0x00c0, 0x822: 0x00c0, 0x823: 0x00c0, + 0x824: 0x00c0, 0x825: 0x00c0, 0x826: 0x00c0, 0x827: 0x00c0, 0x828: 0x00c0, 0x829: 0x00c0, + 0x82a: 0x00c0, 0x82b: 0x00c0, 0x82c: 0x00c0, 0x82d: 0x00c0, 0x82e: 0x00c0, 0x82f: 0x00c0, + 0x830: 0x00c0, 0x831: 0x00c0, 0x832: 0x00c0, 0x833: 0x00c0, 0x834: 0x00c0, 0x835: 0x00c0, + 0x836: 0x00c0, 0x837: 0x00c0, 0x838: 0x00c0, 0x839: 0x00c0, 0x83a: 0x00c3, 0x83b: 0x00c0, + 0x83c: 0x00c3, 0x83d: 0x00c0, 0x83e: 0x00c0, 0x83f: 0x00c0, + // Block 0x21, offset 0x840 + 0x840: 0x00c0, 0x841: 0x00c3, 0x842: 0x00c3, 0x843: 0x00c3, 0x844: 0x00c3, 0x845: 0x00c3, + 0x846: 0x00c3, 0x847: 0x00c3, 0x848: 0x00c3, 0x849: 0x00c0, 0x84a: 0x00c0, 0x84b: 0x00c0, + 0x84c: 0x00c0, 0x84d: 0x00c6, 0x84e: 0x00c0, 0x84f: 0x00c0, 0x850: 0x00c0, 0x851: 0x00c3, + 0x852: 0x00c3, 0x853: 0x00c3, 0x854: 0x00c3, 0x855: 0x00c3, 0x856: 0x00c3, 0x857: 0x00c3, + 0x858: 0x0080, 0x859: 0x0080, 0x85a: 0x0080, 0x85b: 0x0080, 0x85c: 0x0080, 0x85d: 0x0080, + 0x85e: 0x0080, 0x85f: 0x0080, 0x860: 0x00c0, 0x861: 0x00c0, 0x862: 0x00c3, 0x863: 0x00c3, + 0x864: 0x0080, 0x865: 0x0080, 0x866: 0x00c0, 0x867: 0x00c0, 0x868: 0x00c0, 0x869: 0x00c0, + 0x86a: 0x00c0, 0x86b: 0x00c0, 0x86c: 0x00c0, 0x86d: 0x00c0, 0x86e: 0x00c0, 0x86f: 0x00c0, + 0x870: 0x0080, 0x871: 0x00c0, 0x872: 0x00c0, 0x873: 0x00c0, 0x874: 0x00c0, 0x875: 0x00c0, + 0x876: 0x00c0, 0x877: 0x00c0, 0x878: 0x00c0, 0x879: 0x00c0, 0x87a: 0x00c0, 0x87b: 0x00c0, + 0x87c: 0x00c0, 0x87d: 0x00c0, 0x87e: 0x00c0, 0x87f: 0x00c0, + // Block 0x22, offset 0x880 + 0x880: 0x00c0, 0x881: 0x00c3, 0x882: 0x00c0, 0x883: 0x00c0, 0x885: 0x00c0, + 0x886: 0x00c0, 0x887: 0x00c0, 0x888: 0x00c0, 0x889: 0x00c0, 0x88a: 0x00c0, 0x88b: 0x00c0, + 0x88c: 0x00c0, 0x88f: 0x00c0, 0x890: 0x00c0, + 0x893: 0x00c0, 0x894: 0x00c0, 0x895: 0x00c0, 0x896: 0x00c0, 0x897: 0x00c0, + 0x898: 0x00c0, 0x899: 0x00c0, 0x89a: 0x00c0, 0x89b: 0x00c0, 0x89c: 0x00c0, 0x89d: 0x00c0, + 0x89e: 0x00c0, 0x89f: 0x00c0, 0x8a0: 0x00c0, 0x8a1: 0x00c0, 0x8a2: 0x00c0, 0x8a3: 0x00c0, + 0x8a4: 0x00c0, 0x8a5: 0x00c0, 0x8a6: 0x00c0, 0x8a7: 0x00c0, 0x8a8: 0x00c0, + 0x8aa: 0x00c0, 0x8ab: 0x00c0, 0x8ac: 0x00c0, 0x8ad: 0x00c0, 0x8ae: 0x00c0, 0x8af: 0x00c0, + 0x8b0: 0x00c0, 0x8b2: 0x00c0, + 0x8b6: 0x00c0, 0x8b7: 0x00c0, 0x8b8: 0x00c0, 0x8b9: 0x00c0, + 0x8bc: 0x00c3, 0x8bd: 0x00c0, 0x8be: 0x00c0, 0x8bf: 0x00c0, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x00c0, 0x8c1: 0x00c3, 0x8c2: 0x00c3, 0x8c3: 0x00c3, 0x8c4: 0x00c3, + 0x8c7: 0x00c0, 0x8c8: 0x00c0, 0x8cb: 0x00c0, + 0x8cc: 0x00c0, 0x8cd: 0x00c6, 0x8ce: 0x00c0, + 0x8d7: 0x00c0, + 0x8dc: 0x0080, 0x8dd: 0x0080, + 0x8df: 0x0080, 0x8e0: 0x00c0, 0x8e1: 0x00c0, 0x8e2: 0x00c3, 0x8e3: 0x00c3, + 0x8e6: 0x00c0, 0x8e7: 0x00c0, 0x8e8: 0x00c0, 0x8e9: 0x00c0, + 0x8ea: 0x00c0, 0x8eb: 0x00c0, 0x8ec: 0x00c0, 0x8ed: 0x00c0, 0x8ee: 0x00c0, 0x8ef: 0x00c0, + 0x8f0: 0x00c0, 0x8f1: 0x00c0, 0x8f2: 0x0080, 0x8f3: 0x0080, 0x8f4: 0x0080, 0x8f5: 0x0080, + 0x8f6: 0x0080, 0x8f7: 0x0080, 0x8f8: 0x0080, 0x8f9: 0x0080, 0x8fa: 0x0080, 0x8fb: 0x0080, + // Block 0x24, offset 0x900 + 0x901: 0x00c3, 0x902: 0x00c3, 0x903: 0x00c0, 0x905: 0x00c0, + 0x906: 0x00c0, 0x907: 0x00c0, 0x908: 0x00c0, 0x909: 0x00c0, 0x90a: 0x00c0, + 0x90f: 0x00c0, 0x910: 0x00c0, + 0x913: 0x00c0, 0x914: 0x00c0, 0x915: 0x00c0, 0x916: 0x00c0, 0x917: 0x00c0, + 0x918: 0x00c0, 0x919: 0x00c0, 0x91a: 0x00c0, 0x91b: 0x00c0, 0x91c: 0x00c0, 0x91d: 0x00c0, + 0x91e: 0x00c0, 0x91f: 0x00c0, 0x920: 0x00c0, 0x921: 0x00c0, 0x922: 0x00c0, 0x923: 0x00c0, + 0x924: 0x00c0, 0x925: 0x00c0, 0x926: 0x00c0, 0x927: 0x00c0, 0x928: 0x00c0, + 0x92a: 0x00c0, 0x92b: 0x00c0, 0x92c: 0x00c0, 0x92d: 0x00c0, 0x92e: 0x00c0, 0x92f: 0x00c0, + 0x930: 0x00c0, 0x932: 0x00c0, 0x933: 0x0080, 0x935: 0x00c0, + 0x936: 0x0080, 0x938: 0x00c0, 0x939: 0x00c0, + 0x93c: 0x00c3, 0x93e: 0x00c0, 0x93f: 0x00c0, + // Block 0x25, offset 0x940 + 0x940: 0x00c0, 0x941: 0x00c3, 0x942: 0x00c3, + 0x947: 0x00c3, 0x948: 0x00c3, 0x94b: 0x00c3, + 0x94c: 0x00c3, 0x94d: 0x00c6, 0x951: 0x00c3, + 0x959: 0x0080, 0x95a: 0x0080, 0x95b: 0x0080, 0x95c: 0x00c0, + 0x95e: 0x0080, + 0x966: 0x00c0, 0x967: 0x00c0, 0x968: 0x00c0, 0x969: 0x00c0, + 0x96a: 0x00c0, 0x96b: 0x00c0, 0x96c: 0x00c0, 0x96d: 0x00c0, 0x96e: 0x00c0, 0x96f: 0x00c0, + 0x970: 0x00c3, 0x971: 0x00c3, 0x972: 0x00c0, 0x973: 0x00c0, 0x974: 0x00c0, 0x975: 0x00c3, + // Block 0x26, offset 0x980 + 0x981: 0x00c3, 0x982: 0x00c3, 0x983: 0x00c0, 0x985: 0x00c0, + 0x986: 0x00c0, 0x987: 0x00c0, 0x988: 0x00c0, 0x989: 0x00c0, 0x98a: 0x00c0, 0x98b: 0x00c0, + 0x98c: 0x00c0, 0x98d: 0x00c0, 0x98f: 0x00c0, 0x990: 0x00c0, 0x991: 0x00c0, + 0x993: 0x00c0, 0x994: 0x00c0, 0x995: 0x00c0, 0x996: 0x00c0, 0x997: 0x00c0, + 0x998: 0x00c0, 0x999: 0x00c0, 0x99a: 0x00c0, 0x99b: 0x00c0, 0x99c: 0x00c0, 0x99d: 0x00c0, + 0x99e: 0x00c0, 0x99f: 0x00c0, 0x9a0: 0x00c0, 0x9a1: 0x00c0, 0x9a2: 0x00c0, 0x9a3: 0x00c0, + 0x9a4: 0x00c0, 0x9a5: 0x00c0, 0x9a6: 0x00c0, 0x9a7: 0x00c0, 0x9a8: 0x00c0, + 0x9aa: 0x00c0, 0x9ab: 0x00c0, 0x9ac: 0x00c0, 0x9ad: 0x00c0, 0x9ae: 0x00c0, 0x9af: 0x00c0, + 0x9b0: 0x00c0, 0x9b2: 0x00c0, 0x9b3: 0x00c0, 0x9b5: 0x00c0, + 0x9b6: 0x00c0, 0x9b7: 0x00c0, 0x9b8: 0x00c0, 0x9b9: 0x00c0, + 0x9bc: 0x00c3, 0x9bd: 0x00c0, 0x9be: 0x00c0, 0x9bf: 0x00c0, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x00c0, 0x9c1: 0x00c3, 0x9c2: 0x00c3, 0x9c3: 0x00c3, 0x9c4: 0x00c3, 0x9c5: 0x00c3, + 0x9c7: 0x00c3, 0x9c8: 0x00c3, 0x9c9: 0x00c0, 0x9cb: 0x00c0, + 0x9cc: 0x00c0, 0x9cd: 0x00c6, 0x9d0: 0x00c0, + 0x9e0: 0x00c0, 0x9e1: 0x00c0, 0x9e2: 0x00c3, 0x9e3: 0x00c3, + 0x9e6: 0x00c0, 0x9e7: 0x00c0, 0x9e8: 0x00c0, 0x9e9: 0x00c0, + 0x9ea: 0x00c0, 0x9eb: 0x00c0, 0x9ec: 0x00c0, 0x9ed: 0x00c0, 0x9ee: 0x00c0, 0x9ef: 0x00c0, + 0x9f0: 0x0080, 0x9f1: 0x0080, + 0x9f9: 0x00c0, + // Block 0x28, offset 0xa00 + 0xa01: 0x00c3, 0xa02: 0x00c0, 0xa03: 0x00c0, 0xa05: 0x00c0, + 0xa06: 0x00c0, 0xa07: 0x00c0, 0xa08: 0x00c0, 0xa09: 0x00c0, 0xa0a: 0x00c0, 0xa0b: 0x00c0, + 0xa0c: 0x00c0, 0xa0f: 0x00c0, 0xa10: 0x00c0, + 0xa13: 0x00c0, 0xa14: 0x00c0, 0xa15: 0x00c0, 0xa16: 0x00c0, 0xa17: 0x00c0, + 0xa18: 0x00c0, 0xa19: 0x00c0, 0xa1a: 0x00c0, 0xa1b: 0x00c0, 0xa1c: 0x00c0, 0xa1d: 0x00c0, + 0xa1e: 0x00c0, 0xa1f: 0x00c0, 0xa20: 0x00c0, 0xa21: 0x00c0, 0xa22: 0x00c0, 0xa23: 0x00c0, + 0xa24: 0x00c0, 0xa25: 0x00c0, 0xa26: 0x00c0, 0xa27: 0x00c0, 0xa28: 0x00c0, + 0xa2a: 0x00c0, 0xa2b: 0x00c0, 0xa2c: 0x00c0, 0xa2d: 0x00c0, 0xa2e: 0x00c0, 0xa2f: 0x00c0, + 0xa30: 0x00c0, 0xa32: 0x00c0, 0xa33: 0x00c0, 0xa35: 0x00c0, + 0xa36: 0x00c0, 0xa37: 0x00c0, 0xa38: 0x00c0, 0xa39: 0x00c0, + 0xa3c: 0x00c3, 0xa3d: 0x00c0, 0xa3e: 0x00c0, 0xa3f: 0x00c3, + // Block 0x29, offset 0xa40 + 0xa40: 0x00c0, 0xa41: 0x00c3, 0xa42: 0x00c3, 0xa43: 0x00c3, 0xa44: 0x00c3, + 0xa47: 0x00c0, 0xa48: 0x00c0, 0xa4b: 0x00c0, + 0xa4c: 0x00c0, 0xa4d: 0x00c6, + 0xa56: 0x00c3, 0xa57: 0x00c0, + 0xa5c: 0x0080, 0xa5d: 0x0080, + 0xa5f: 0x00c0, 0xa60: 0x00c0, 0xa61: 0x00c0, 0xa62: 0x00c3, 0xa63: 0x00c3, + 0xa66: 0x00c0, 0xa67: 0x00c0, 0xa68: 0x00c0, 0xa69: 0x00c0, + 0xa6a: 0x00c0, 0xa6b: 0x00c0, 0xa6c: 0x00c0, 0xa6d: 0x00c0, 0xa6e: 0x00c0, 0xa6f: 0x00c0, + 0xa70: 0x0080, 0xa71: 0x00c0, 0xa72: 0x0080, 0xa73: 0x0080, 0xa74: 0x0080, 0xa75: 0x0080, + 0xa76: 0x0080, 0xa77: 0x0080, + // Block 0x2a, offset 0xa80 + 0xa82: 0x00c3, 0xa83: 0x00c0, 0xa85: 0x00c0, + 0xa86: 0x00c0, 0xa87: 0x00c0, 0xa88: 0x00c0, 0xa89: 0x00c0, 0xa8a: 0x00c0, + 0xa8e: 0x00c0, 0xa8f: 0x00c0, 0xa90: 0x00c0, + 0xa92: 0x00c0, 0xa93: 0x00c0, 0xa94: 0x00c0, 0xa95: 0x00c0, + 0xa99: 0x00c0, 0xa9a: 0x00c0, 0xa9c: 0x00c0, + 0xa9e: 0x00c0, 0xa9f: 0x00c0, 0xaa3: 0x00c0, + 0xaa4: 0x00c0, 0xaa8: 0x00c0, 0xaa9: 0x00c0, + 0xaaa: 0x00c0, 0xaae: 0x00c0, 0xaaf: 0x00c0, + 0xab0: 0x00c0, 0xab1: 0x00c0, 0xab2: 0x00c0, 0xab3: 0x00c0, 0xab4: 0x00c0, 0xab5: 0x00c0, + 0xab6: 0x00c0, 0xab7: 0x00c0, 0xab8: 0x00c0, 0xab9: 0x00c0, + 0xabe: 0x00c0, 0xabf: 0x00c0, + // Block 0x2b, offset 0xac0 + 0xac0: 0x00c3, 0xac1: 0x00c0, 0xac2: 0x00c0, + 0xac6: 0x00c0, 0xac7: 0x00c0, 0xac8: 0x00c0, 0xaca: 0x00c0, 0xacb: 0x00c0, + 0xacc: 0x00c0, 0xacd: 0x00c6, 0xad0: 0x00c0, + 0xad7: 0x00c0, + 0xae6: 0x00c0, 0xae7: 0x00c0, 0xae8: 0x00c0, 0xae9: 0x00c0, + 0xaea: 0x00c0, 0xaeb: 0x00c0, 0xaec: 0x00c0, 0xaed: 0x00c0, 0xaee: 0x00c0, 0xaef: 0x00c0, + 0xaf0: 0x0080, 0xaf1: 0x0080, 0xaf2: 0x0080, 0xaf3: 0x0080, 0xaf4: 0x0080, 0xaf5: 0x0080, + 0xaf6: 0x0080, 0xaf7: 0x0080, 0xaf8: 0x0080, 0xaf9: 0x0080, 0xafa: 0x0080, + // Block 0x2c, offset 0xb00 + 0xb00: 0x00c3, 0xb01: 0x00c0, 0xb02: 0x00c0, 0xb03: 0x00c0, 0xb05: 0x00c0, + 0xb06: 0x00c0, 0xb07: 0x00c0, 0xb08: 0x00c0, 0xb09: 0x00c0, 0xb0a: 0x00c0, 0xb0b: 0x00c0, + 0xb0c: 0x00c0, 0xb0e: 0x00c0, 0xb0f: 0x00c0, 0xb10: 0x00c0, + 0xb12: 0x00c0, 0xb13: 0x00c0, 0xb14: 0x00c0, 0xb15: 0x00c0, 0xb16: 0x00c0, 0xb17: 0x00c0, + 0xb18: 0x00c0, 0xb19: 0x00c0, 0xb1a: 0x00c0, 0xb1b: 0x00c0, 0xb1c: 0x00c0, 0xb1d: 0x00c0, + 0xb1e: 0x00c0, 0xb1f: 0x00c0, 0xb20: 0x00c0, 0xb21: 0x00c0, 0xb22: 0x00c0, 0xb23: 0x00c0, + 0xb24: 0x00c0, 0xb25: 0x00c0, 0xb26: 0x00c0, 0xb27: 0x00c0, 0xb28: 0x00c0, + 0xb2a: 0x00c0, 0xb2b: 0x00c0, 0xb2c: 0x00c0, 0xb2d: 0x00c0, 0xb2e: 0x00c0, 0xb2f: 0x00c0, + 0xb30: 0x00c0, 0xb31: 0x00c0, 0xb32: 0x00c0, 0xb33: 0x00c0, 0xb34: 0x00c0, 0xb35: 0x00c0, + 0xb36: 0x00c0, 0xb37: 0x00c0, 0xb38: 0x00c0, 0xb39: 0x00c0, + 0xb3d: 0x00c0, 0xb3e: 0x00c3, 0xb3f: 0x00c3, + // Block 0x2d, offset 0xb40 + 0xb40: 0x00c3, 0xb41: 0x00c0, 0xb42: 0x00c0, 0xb43: 0x00c0, 0xb44: 0x00c0, + 0xb46: 0x00c3, 0xb47: 0x00c3, 0xb48: 0x00c3, 0xb4a: 0x00c3, 0xb4b: 0x00c3, + 0xb4c: 0x00c3, 0xb4d: 0x00c6, + 0xb55: 0x00c3, 0xb56: 0x00c3, + 0xb58: 0x00c0, 0xb59: 0x00c0, 0xb5a: 0x00c0, + 0xb60: 0x00c0, 0xb61: 0x00c0, 0xb62: 0x00c3, 0xb63: 0x00c3, + 0xb66: 0x00c0, 0xb67: 0x00c0, 0xb68: 0x00c0, 0xb69: 0x00c0, + 0xb6a: 0x00c0, 0xb6b: 0x00c0, 0xb6c: 0x00c0, 0xb6d: 0x00c0, 0xb6e: 0x00c0, 0xb6f: 0x00c0, + 0xb78: 0x0080, 0xb79: 0x0080, 0xb7a: 0x0080, 0xb7b: 0x0080, + 0xb7c: 0x0080, 0xb7d: 0x0080, 0xb7e: 0x0080, 0xb7f: 0x0080, + // Block 0x2e, offset 0xb80 + 0xb80: 0x00c0, 0xb81: 0x00c3, 0xb82: 0x00c0, 0xb83: 0x00c0, 0xb85: 0x00c0, + 0xb86: 0x00c0, 0xb87: 0x00c0, 0xb88: 0x00c0, 0xb89: 0x00c0, 0xb8a: 0x00c0, 0xb8b: 0x00c0, + 0xb8c: 0x00c0, 0xb8e: 0x00c0, 0xb8f: 0x00c0, 0xb90: 0x00c0, + 0xb92: 0x00c0, 0xb93: 0x00c0, 0xb94: 0x00c0, 0xb95: 0x00c0, 0xb96: 0x00c0, 0xb97: 0x00c0, + 0xb98: 0x00c0, 0xb99: 0x00c0, 0xb9a: 0x00c0, 0xb9b: 0x00c0, 0xb9c: 0x00c0, 0xb9d: 0x00c0, + 0xb9e: 0x00c0, 0xb9f: 0x00c0, 0xba0: 0x00c0, 0xba1: 0x00c0, 0xba2: 0x00c0, 0xba3: 0x00c0, + 0xba4: 0x00c0, 0xba5: 0x00c0, 0xba6: 0x00c0, 0xba7: 0x00c0, 0xba8: 0x00c0, + 0xbaa: 0x00c0, 0xbab: 0x00c0, 0xbac: 0x00c0, 0xbad: 0x00c0, 0xbae: 0x00c0, 0xbaf: 0x00c0, + 0xbb0: 0x00c0, 0xbb1: 0x00c0, 0xbb2: 0x00c0, 0xbb3: 0x00c0, 0xbb5: 0x00c0, + 0xbb6: 0x00c0, 0xbb7: 0x00c0, 0xbb8: 0x00c0, 0xbb9: 0x00c0, + 0xbbc: 0x00c3, 0xbbd: 0x00c0, 0xbbe: 0x00c0, 0xbbf: 0x00c3, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x00c0, 0xbc1: 0x00c0, 0xbc2: 0x00c0, 0xbc3: 0x00c0, 0xbc4: 0x00c0, + 0xbc6: 0x00c3, 0xbc7: 0x00c0, 0xbc8: 0x00c0, 0xbca: 0x00c0, 0xbcb: 0x00c0, + 0xbcc: 0x00c3, 0xbcd: 0x00c6, + 0xbd5: 0x00c0, 0xbd6: 0x00c0, + 0xbde: 0x00c0, 0xbe0: 0x00c0, 0xbe1: 0x00c0, 0xbe2: 0x00c3, 0xbe3: 0x00c3, + 0xbe6: 0x00c0, 0xbe7: 0x00c0, 0xbe8: 0x00c0, 0xbe9: 0x00c0, + 0xbea: 0x00c0, 0xbeb: 0x00c0, 0xbec: 0x00c0, 0xbed: 0x00c0, 0xbee: 0x00c0, 0xbef: 0x00c0, + 0xbf1: 0x00c0, 0xbf2: 0x00c0, + // Block 0x30, offset 0xc00 + 0xc01: 0x00c3, 0xc02: 0x00c0, 0xc03: 0x00c0, 0xc05: 0x00c0, + 0xc06: 0x00c0, 0xc07: 0x00c0, 0xc08: 0x00c0, 0xc09: 0x00c0, 0xc0a: 0x00c0, 0xc0b: 0x00c0, + 0xc0c: 0x00c0, 0xc0e: 0x00c0, 0xc0f: 0x00c0, 0xc10: 0x00c0, + 0xc12: 0x00c0, 0xc13: 0x00c0, 0xc14: 0x00c0, 0xc15: 0x00c0, 0xc16: 0x00c0, 0xc17: 0x00c0, + 0xc18: 0x00c0, 0xc19: 0x00c0, 0xc1a: 0x00c0, 0xc1b: 0x00c0, 0xc1c: 0x00c0, 0xc1d: 0x00c0, + 0xc1e: 0x00c0, 0xc1f: 0x00c0, 0xc20: 0x00c0, 0xc21: 0x00c0, 0xc22: 0x00c0, 0xc23: 0x00c0, + 0xc24: 0x00c0, 0xc25: 0x00c0, 0xc26: 0x00c0, 0xc27: 0x00c0, 0xc28: 0x00c0, 0xc29: 0x00c0, + 0xc2a: 0x00c0, 0xc2b: 0x00c0, 0xc2c: 0x00c0, 0xc2d: 0x00c0, 0xc2e: 0x00c0, 0xc2f: 0x00c0, + 0xc30: 0x00c0, 0xc31: 0x00c0, 0xc32: 0x00c0, 0xc33: 0x00c0, 0xc34: 0x00c0, 0xc35: 0x00c0, + 0xc36: 0x00c0, 0xc37: 0x00c0, 0xc38: 0x00c0, 0xc39: 0x00c0, 0xc3a: 0x00c0, + 0xc3d: 0x00c0, 0xc3e: 0x00c0, 0xc3f: 0x00c0, + // Block 0x31, offset 0xc40 + 0xc40: 0x00c0, 0xc41: 0x00c3, 0xc42: 0x00c3, 0xc43: 0x00c3, 0xc44: 0x00c3, + 0xc46: 0x00c0, 0xc47: 0x00c0, 0xc48: 0x00c0, 0xc4a: 0x00c0, 0xc4b: 0x00c0, + 0xc4c: 0x00c0, 0xc4d: 0x00c6, 0xc4e: 0x00c0, 0xc4f: 0x0080, + 0xc54: 0x00c0, 0xc55: 0x00c0, 0xc56: 0x00c0, 0xc57: 0x00c0, + 0xc58: 0x0080, 0xc59: 0x0080, 0xc5a: 0x0080, 0xc5b: 0x0080, 0xc5c: 0x0080, 0xc5d: 0x0080, + 0xc5e: 0x0080, 0xc5f: 0x00c0, 0xc60: 0x00c0, 0xc61: 0x00c0, 0xc62: 0x00c3, 0xc63: 0x00c3, + 0xc66: 0x00c0, 0xc67: 0x00c0, 0xc68: 0x00c0, 0xc69: 0x00c0, + 0xc6a: 0x00c0, 0xc6b: 0x00c0, 0xc6c: 0x00c0, 0xc6d: 0x00c0, 0xc6e: 0x00c0, 0xc6f: 0x00c0, + 0xc70: 0x0080, 0xc71: 0x0080, 0xc72: 0x0080, 0xc73: 0x0080, 0xc74: 0x0080, 0xc75: 0x0080, + 0xc76: 0x0080, 0xc77: 0x0080, 0xc78: 0x0080, 0xc79: 0x0080, 0xc7a: 0x00c0, 0xc7b: 0x00c0, + 0xc7c: 0x00c0, 0xc7d: 0x00c0, 0xc7e: 0x00c0, 0xc7f: 0x00c0, + // Block 0x32, offset 0xc80 + 0xc82: 0x00c0, 0xc83: 0x00c0, 0xc85: 0x00c0, + 0xc86: 0x00c0, 0xc87: 0x00c0, 0xc88: 0x00c0, 0xc89: 0x00c0, 0xc8a: 0x00c0, 0xc8b: 0x00c0, + 0xc8c: 0x00c0, 0xc8d: 0x00c0, 0xc8e: 0x00c0, 0xc8f: 0x00c0, 0xc90: 0x00c0, 0xc91: 0x00c0, + 0xc92: 0x00c0, 0xc93: 0x00c0, 0xc94: 0x00c0, 0xc95: 0x00c0, 0xc96: 0x00c0, + 0xc9a: 0x00c0, 0xc9b: 0x00c0, 0xc9c: 0x00c0, 0xc9d: 0x00c0, + 0xc9e: 0x00c0, 0xc9f: 0x00c0, 0xca0: 0x00c0, 0xca1: 0x00c0, 0xca2: 0x00c0, 0xca3: 0x00c0, + 0xca4: 0x00c0, 0xca5: 0x00c0, 0xca6: 0x00c0, 0xca7: 0x00c0, 0xca8: 0x00c0, 0xca9: 0x00c0, + 0xcaa: 0x00c0, 0xcab: 0x00c0, 0xcac: 0x00c0, 0xcad: 0x00c0, 0xcae: 0x00c0, 0xcaf: 0x00c0, + 0xcb0: 0x00c0, 0xcb1: 0x00c0, 0xcb3: 0x00c0, 0xcb4: 0x00c0, 0xcb5: 0x00c0, + 0xcb6: 0x00c0, 0xcb7: 0x00c0, 0xcb8: 0x00c0, 0xcb9: 0x00c0, 0xcba: 0x00c0, 0xcbb: 0x00c0, + 0xcbd: 0x00c0, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x00c0, 0xcc1: 0x00c0, 0xcc2: 0x00c0, 0xcc3: 0x00c0, 0xcc4: 0x00c0, 0xcc5: 0x00c0, + 0xcc6: 0x00c0, 0xcca: 0x00c6, + 0xccf: 0x00c0, 0xcd0: 0x00c0, 0xcd1: 0x00c0, + 0xcd2: 0x00c3, 0xcd3: 0x00c3, 0xcd4: 0x00c3, 0xcd6: 0x00c3, + 0xcd8: 0x00c0, 0xcd9: 0x00c0, 0xcda: 0x00c0, 0xcdb: 0x00c0, 0xcdc: 0x00c0, 0xcdd: 0x00c0, + 0xcde: 0x00c0, 0xcdf: 0x00c0, + 0xce6: 0x00c0, 0xce7: 0x00c0, 0xce8: 0x00c0, 0xce9: 0x00c0, + 0xcea: 0x00c0, 0xceb: 0x00c0, 0xcec: 0x00c0, 0xced: 0x00c0, 0xcee: 0x00c0, 0xcef: 0x00c0, + 0xcf2: 0x00c0, 0xcf3: 0x00c0, 0xcf4: 0x0080, + // Block 0x34, offset 0xd00 + 0xd01: 0x00c0, 0xd02: 0x00c0, 0xd03: 0x00c0, 0xd04: 0x00c0, 0xd05: 0x00c0, + 0xd06: 0x00c0, 0xd07: 0x00c0, 0xd08: 0x00c0, 0xd09: 0x00c0, 0xd0a: 0x00c0, 0xd0b: 0x00c0, + 0xd0c: 0x00c0, 0xd0d: 0x00c0, 0xd0e: 0x00c0, 0xd0f: 0x00c0, 0xd10: 0x00c0, 0xd11: 0x00c0, + 0xd12: 0x00c0, 0xd13: 0x00c0, 0xd14: 0x00c0, 0xd15: 0x00c0, 0xd16: 0x00c0, 0xd17: 0x00c0, + 0xd18: 0x00c0, 0xd19: 0x00c0, 0xd1a: 0x00c0, 0xd1b: 0x00c0, 0xd1c: 0x00c0, 0xd1d: 0x00c0, + 0xd1e: 0x00c0, 0xd1f: 0x00c0, 0xd20: 0x00c0, 0xd21: 0x00c0, 0xd22: 0x00c0, 0xd23: 0x00c0, + 0xd24: 0x00c0, 0xd25: 0x00c0, 0xd26: 0x00c0, 0xd27: 0x00c0, 0xd28: 0x00c0, 0xd29: 0x00c0, + 0xd2a: 0x00c0, 0xd2b: 0x00c0, 0xd2c: 0x00c0, 0xd2d: 0x00c0, 0xd2e: 0x00c0, 0xd2f: 0x00c0, + 0xd30: 0x00c0, 0xd31: 0x00c3, 0xd32: 0x00c0, 0xd33: 0x0080, 0xd34: 0x00c3, 0xd35: 0x00c3, + 0xd36: 0x00c3, 0xd37: 0x00c3, 0xd38: 0x00c3, 0xd39: 0x00c3, 0xd3a: 0x00c6, + 0xd3f: 0x0080, + // Block 0x35, offset 0xd40 + 0xd40: 0x00c0, 0xd41: 0x00c0, 0xd42: 0x00c0, 0xd43: 0x00c0, 0xd44: 0x00c0, 0xd45: 0x00c0, + 0xd46: 0x00c0, 0xd47: 0x00c3, 0xd48: 0x00c3, 0xd49: 0x00c3, 0xd4a: 0x00c3, 0xd4b: 0x00c3, + 0xd4c: 0x00c3, 0xd4d: 0x00c3, 0xd4e: 0x00c3, 0xd4f: 0x0080, 0xd50: 0x00c0, 0xd51: 0x00c0, + 0xd52: 0x00c0, 0xd53: 0x00c0, 0xd54: 0x00c0, 0xd55: 0x00c0, 0xd56: 0x00c0, 0xd57: 0x00c0, + 0xd58: 0x00c0, 0xd59: 0x00c0, 0xd5a: 0x0080, 0xd5b: 0x0080, + // Block 0x36, offset 0xd80 + 0xd81: 0x00c0, 0xd82: 0x00c0, 0xd84: 0x00c0, + 0xd87: 0x00c0, 0xd88: 0x00c0, 0xd8a: 0x00c0, + 0xd8d: 0x00c0, + 0xd94: 0x00c0, 0xd95: 0x00c0, 0xd96: 0x00c0, 0xd97: 0x00c0, + 0xd99: 0x00c0, 0xd9a: 0x00c0, 0xd9b: 0x00c0, 0xd9c: 0x00c0, 0xd9d: 0x00c0, + 0xd9e: 0x00c0, 0xd9f: 0x00c0, 0xda1: 0x00c0, 0xda2: 0x00c0, 0xda3: 0x00c0, + 0xda5: 0x00c0, 0xda7: 0x00c0, + 0xdaa: 0x00c0, 0xdab: 0x00c0, 0xdad: 0x00c0, 0xdae: 0x00c0, 0xdaf: 0x00c0, + 0xdb0: 0x00c0, 0xdb1: 0x00c3, 0xdb2: 0x00c0, 0xdb3: 0x0080, 0xdb4: 0x00c3, 0xdb5: 0x00c3, + 0xdb6: 0x00c3, 0xdb7: 0x00c3, 0xdb8: 0x00c3, 0xdb9: 0x00c3, 0xdbb: 0x00c3, + 0xdbc: 0x00c3, 0xdbd: 0x00c0, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x00c0, 0xdc1: 0x00c0, 0xdc2: 0x00c0, 0xdc3: 0x00c0, 0xdc4: 0x00c0, + 0xdc6: 0x00c0, 0xdc8: 0x00c3, 0xdc9: 0x00c3, 0xdca: 0x00c3, 0xdcb: 0x00c3, + 0xdcc: 0x00c3, 0xdcd: 0x00c3, 0xdd0: 0x00c0, 0xdd1: 0x00c0, + 0xdd2: 0x00c0, 0xdd3: 0x00c0, 0xdd4: 0x00c0, 0xdd5: 0x00c0, 0xdd6: 0x00c0, 0xdd7: 0x00c0, + 0xdd8: 0x00c0, 0xdd9: 0x00c0, 0xddc: 0x0080, 0xddd: 0x0080, + 0xdde: 0x00c0, 0xddf: 0x00c0, + // Block 0x38, offset 0xe00 + 0xe00: 0x00c0, 0xe01: 0x0080, 0xe02: 0x0080, 0xe03: 0x0080, 0xe04: 0x0080, 0xe05: 0x0080, + 0xe06: 0x0080, 0xe07: 0x0080, 0xe08: 0x0080, 0xe09: 0x0080, 0xe0a: 0x0080, 0xe0b: 0x00c0, + 0xe0c: 0x0080, 0xe0d: 0x0080, 0xe0e: 0x0080, 0xe0f: 0x0080, 0xe10: 0x0080, 0xe11: 0x0080, + 0xe12: 0x0080, 0xe13: 0x0080, 0xe14: 0x0080, 0xe15: 0x0080, 0xe16: 0x0080, 0xe17: 0x0080, + 0xe18: 0x00c3, 0xe19: 0x00c3, 0xe1a: 0x0080, 0xe1b: 0x0080, 0xe1c: 0x0080, 0xe1d: 0x0080, + 0xe1e: 0x0080, 0xe1f: 0x0080, 0xe20: 0x00c0, 0xe21: 0x00c0, 0xe22: 0x00c0, 0xe23: 0x00c0, + 0xe24: 0x00c0, 0xe25: 0x00c0, 0xe26: 0x00c0, 0xe27: 0x00c0, 0xe28: 0x00c0, 0xe29: 0x00c0, + 0xe2a: 0x0080, 0xe2b: 0x0080, 0xe2c: 0x0080, 0xe2d: 0x0080, 0xe2e: 0x0080, 0xe2f: 0x0080, + 0xe30: 0x0080, 0xe31: 0x0080, 0xe32: 0x0080, 0xe33: 0x0080, 0xe34: 0x0080, 0xe35: 0x00c3, + 0xe36: 0x0080, 0xe37: 0x00c3, 0xe38: 0x0080, 0xe39: 0x00c3, 0xe3a: 0x0080, 0xe3b: 0x0080, + 0xe3c: 0x0080, 0xe3d: 0x0080, 0xe3e: 0x00c0, 0xe3f: 0x00c0, + // Block 0x39, offset 0xe40 + 0xe40: 0x00c0, 0xe41: 0x00c0, 0xe42: 0x00c0, 0xe43: 0x0080, 0xe44: 0x00c0, 0xe45: 0x00c0, + 0xe46: 0x00c0, 0xe47: 0x00c0, 0xe49: 0x00c0, 0xe4a: 0x00c0, 0xe4b: 0x00c0, + 0xe4c: 0x00c0, 0xe4d: 0x0080, 0xe4e: 0x00c0, 0xe4f: 0x00c0, 0xe50: 0x00c0, 0xe51: 0x00c0, + 0xe52: 0x0080, 0xe53: 0x00c0, 0xe54: 0x00c0, 0xe55: 0x00c0, 0xe56: 0x00c0, 0xe57: 0x0080, + 0xe58: 0x00c0, 0xe59: 0x00c0, 0xe5a: 0x00c0, 0xe5b: 0x00c0, 0xe5c: 0x0080, 0xe5d: 0x00c0, + 0xe5e: 0x00c0, 0xe5f: 0x00c0, 0xe60: 0x00c0, 0xe61: 0x00c0, 0xe62: 0x00c0, 0xe63: 0x00c0, + 0xe64: 0x00c0, 0xe65: 0x00c0, 0xe66: 0x00c0, 0xe67: 0x00c0, 0xe68: 0x00c0, 0xe69: 0x0080, + 0xe6a: 0x00c0, 0xe6b: 0x00c0, 0xe6c: 0x00c0, + 0xe71: 0x00c3, 0xe72: 0x00c3, 0xe73: 0x0083, 0xe74: 0x00c3, 0xe75: 0x0083, + 0xe76: 0x0083, 0xe77: 0x0083, 0xe78: 0x0083, 0xe79: 0x0083, 0xe7a: 0x00c3, 0xe7b: 0x00c3, + 0xe7c: 0x00c3, 0xe7d: 0x00c3, 0xe7e: 0x00c3, 0xe7f: 0x00c0, + // Block 0x3a, offset 0xe80 + 0xe80: 0x00c3, 0xe81: 0x0083, 0xe82: 0x00c3, 0xe83: 0x00c3, 0xe84: 0x00c6, 0xe85: 0x0080, + 0xe86: 0x00c3, 0xe87: 0x00c3, 0xe88: 0x00c0, 0xe89: 0x00c0, 0xe8a: 0x00c0, 0xe8b: 0x00c0, + 0xe8c: 0x00c0, 0xe8d: 0x00c3, 0xe8e: 0x00c3, 0xe8f: 0x00c3, 0xe90: 0x00c3, 0xe91: 0x00c3, + 0xe92: 0x00c3, 0xe93: 0x0083, 0xe94: 0x00c3, 0xe95: 0x00c3, 0xe96: 0x00c3, 0xe97: 0x00c3, + 0xe99: 0x00c3, 0xe9a: 0x00c3, 0xe9b: 0x00c3, 0xe9c: 0x00c3, 0xe9d: 0x0083, + 0xe9e: 0x00c3, 0xe9f: 0x00c3, 0xea0: 0x00c3, 0xea1: 0x00c3, 0xea2: 0x0083, 0xea3: 0x00c3, + 0xea4: 0x00c3, 0xea5: 0x00c3, 0xea6: 0x00c3, 0xea7: 0x0083, 0xea8: 0x00c3, 0xea9: 0x00c3, + 0xeaa: 0x00c3, 0xeab: 0x00c3, 0xeac: 0x0083, 0xead: 0x00c3, 0xeae: 0x00c3, 0xeaf: 0x00c3, + 0xeb0: 0x00c3, 0xeb1: 0x00c3, 0xeb2: 0x00c3, 0xeb3: 0x00c3, 0xeb4: 0x00c3, 0xeb5: 0x00c3, + 0xeb6: 0x00c3, 0xeb7: 0x00c3, 0xeb8: 0x00c3, 0xeb9: 0x0083, 0xeba: 0x00c3, 0xebb: 0x00c3, + 0xebc: 0x00c3, 0xebe: 0x0080, 0xebf: 0x0080, + // Block 0x3b, offset 0xec0 + 0xec0: 0x0080, 0xec1: 0x0080, 0xec2: 0x0080, 0xec3: 0x0080, 0xec4: 0x0080, 0xec5: 0x0080, + 0xec6: 0x00c3, 0xec7: 0x0080, 0xec8: 0x0080, 0xec9: 0x0080, 0xeca: 0x0080, 0xecb: 0x0080, + 0xecc: 0x0080, 0xece: 0x0080, 0xecf: 0x0080, 0xed0: 0x0080, 0xed1: 0x0080, + 0xed2: 0x0080, 0xed3: 0x0080, 0xed4: 0x0080, 0xed5: 0x0080, 0xed6: 0x0080, 0xed7: 0x0080, + 0xed8: 0x0080, 0xed9: 0x0080, 0xeda: 0x0080, + // Block 0x3c, offset 0xf00 + 0xf00: 0x00c0, 0xf01: 0x00c0, 0xf02: 0x00c0, 0xf03: 0x00c0, 0xf04: 0x00c0, 0xf05: 0x00c0, + 0xf06: 0x00c0, 0xf07: 0x00c0, 0xf08: 0x00c0, 0xf09: 0x00c0, 0xf0a: 0x00c0, 0xf0b: 0x00c0, + 0xf0c: 0x00c0, 0xf0d: 0x00c0, 0xf0e: 0x00c0, 0xf0f: 0x00c0, 0xf10: 0x00c0, 0xf11: 0x00c0, + 0xf12: 0x00c0, 0xf13: 0x00c0, 0xf14: 0x00c0, 0xf15: 0x00c0, 0xf16: 0x00c0, 0xf17: 0x00c0, + 0xf18: 0x00c0, 0xf19: 0x00c0, 0xf1a: 0x00c0, 0xf1b: 0x00c0, 0xf1c: 0x00c0, 0xf1d: 0x00c0, + 0xf1e: 0x00c0, 0xf1f: 0x00c0, 0xf20: 0x00c0, 0xf21: 0x00c0, 0xf22: 0x00c0, 0xf23: 0x00c0, + 0xf24: 0x00c0, 0xf25: 0x00c0, 0xf26: 0x00c0, 0xf27: 0x00c0, 0xf28: 0x00c0, 0xf29: 0x00c0, + 0xf2a: 0x00c0, 0xf2b: 0x00c0, 0xf2c: 0x00c0, 0xf2d: 0x00c3, 0xf2e: 0x00c3, 0xf2f: 0x00c3, + 0xf30: 0x00c3, 0xf31: 0x00c0, 0xf32: 0x00c3, 0xf33: 0x00c3, 0xf34: 0x00c3, 0xf35: 0x00c3, + 0xf36: 0x00c3, 0xf37: 0x00c3, 0xf38: 0x00c0, 0xf39: 0x00c6, 0xf3a: 0x00c6, 0xf3b: 0x00c0, + 0xf3c: 0x00c0, 0xf3d: 0x00c3, 0xf3e: 0x00c3, 0xf3f: 0x00c0, + // Block 0x3d, offset 0xf40 + 0xf40: 0x00c0, 0xf41: 0x00c0, 0xf42: 0x00c0, 0xf43: 0x00c0, 0xf44: 0x00c0, 0xf45: 0x00c0, + 0xf46: 0x00c0, 0xf47: 0x00c0, 0xf48: 0x00c0, 0xf49: 0x00c0, 0xf4a: 0x0080, 0xf4b: 0x0080, + 0xf4c: 0x0080, 0xf4d: 0x0080, 0xf4e: 0x0080, 0xf4f: 0x0080, 0xf50: 0x00c0, 0xf51: 0x00c0, + 0xf52: 0x00c0, 0xf53: 0x00c0, 0xf54: 0x00c0, 0xf55: 0x00c0, 0xf56: 0x00c0, 0xf57: 0x00c0, + 0xf58: 0x00c3, 0xf59: 0x00c3, 0xf5a: 0x00c0, 0xf5b: 0x00c0, 0xf5c: 0x00c0, 0xf5d: 0x00c0, + 0xf5e: 0x00c3, 0xf5f: 0x00c3, 0xf60: 0x00c3, 0xf61: 0x00c0, 0xf62: 0x00c0, 0xf63: 0x00c0, + 0xf64: 0x00c0, 0xf65: 0x00c0, 0xf66: 0x00c0, 0xf67: 0x00c0, 0xf68: 0x00c0, 0xf69: 0x00c0, + 0xf6a: 0x00c0, 0xf6b: 0x00c0, 0xf6c: 0x00c0, 0xf6d: 0x00c0, 0xf6e: 0x00c0, 0xf6f: 0x00c0, + 0xf70: 0x00c0, 0xf71: 0x00c3, 0xf72: 0x00c3, 0xf73: 0x00c3, 0xf74: 0x00c3, 0xf75: 0x00c0, + 0xf76: 0x00c0, 0xf77: 0x00c0, 0xf78: 0x00c0, 0xf79: 0x00c0, 0xf7a: 0x00c0, 0xf7b: 0x00c0, + 0xf7c: 0x00c0, 0xf7d: 0x00c0, 0xf7e: 0x00c0, 0xf7f: 0x00c0, + // Block 0x3e, offset 0xf80 + 0xf80: 0x00c0, 0xf81: 0x00c0, 0xf82: 0x00c3, 0xf83: 0x00c0, 0xf84: 0x00c0, 0xf85: 0x00c3, + 0xf86: 0x00c3, 0xf87: 0x00c0, 0xf88: 0x00c0, 0xf89: 0x00c0, 0xf8a: 0x00c0, 0xf8b: 0x00c0, + 0xf8c: 0x00c0, 0xf8d: 0x00c3, 0xf8e: 0x00c0, 0xf8f: 0x00c0, 0xf90: 0x00c0, 0xf91: 0x00c0, + 0xf92: 0x00c0, 0xf93: 0x00c0, 0xf94: 0x00c0, 0xf95: 0x00c0, 0xf96: 0x00c0, 0xf97: 0x00c0, + 0xf98: 0x00c0, 0xf99: 0x00c0, 0xf9a: 0x00c0, 0xf9b: 0x00c0, 0xf9c: 0x00c0, 0xf9d: 0x00c3, + 0xf9e: 0x0080, 0xf9f: 0x0080, 0xfa0: 0x00c0, 0xfa1: 0x00c0, 0xfa2: 0x00c0, 0xfa3: 0x00c0, + 0xfa4: 0x00c0, 0xfa5: 0x00c0, 0xfa6: 0x00c0, 0xfa7: 0x00c0, 0xfa8: 0x00c0, 0xfa9: 0x00c0, + 0xfaa: 0x00c0, 0xfab: 0x00c0, 0xfac: 0x00c0, 0xfad: 0x00c0, 0xfae: 0x00c0, 0xfaf: 0x00c0, + 0xfb0: 0x00c0, 0xfb1: 0x00c0, 0xfb2: 0x00c0, 0xfb3: 0x00c0, 0xfb4: 0x00c0, 0xfb5: 0x00c0, + 0xfb6: 0x00c0, 0xfb7: 0x00c0, 0xfb8: 0x00c0, 0xfb9: 0x00c0, 0xfba: 0x00c0, 0xfbb: 0x00c0, + 0xfbc: 0x00c0, 0xfbd: 0x00c0, 0xfbe: 0x00c0, 0xfbf: 0x00c0, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x00c0, 0xfc1: 0x00c0, 0xfc2: 0x00c0, 0xfc3: 0x00c0, 0xfc4: 0x00c0, 0xfc5: 0x00c0, + 0xfc7: 0x00c0, + 0xfcd: 0x00c0, 0xfd0: 0x00c0, 0xfd1: 0x00c0, + 0xfd2: 0x00c0, 0xfd3: 0x00c0, 0xfd4: 0x00c0, 0xfd5: 0x00c0, 0xfd6: 0x00c0, 0xfd7: 0x00c0, + 0xfd8: 0x00c0, 0xfd9: 0x00c0, 0xfda: 0x00c0, 0xfdb: 0x00c0, 0xfdc: 0x00c0, 0xfdd: 0x00c0, + 0xfde: 0x00c0, 0xfdf: 0x00c0, 0xfe0: 0x00c0, 0xfe1: 0x00c0, 0xfe2: 0x00c0, 0xfe3: 0x00c0, + 0xfe4: 0x00c0, 0xfe5: 0x00c0, 0xfe6: 0x00c0, 0xfe7: 0x00c0, 0xfe8: 0x00c0, 0xfe9: 0x00c0, + 0xfea: 0x00c0, 0xfeb: 0x00c0, 0xfec: 0x00c0, 0xfed: 0x00c0, 0xfee: 0x00c0, 0xfef: 0x00c0, + 0xff0: 0x00c0, 0xff1: 0x00c0, 0xff2: 0x00c0, 0xff3: 0x00c0, 0xff4: 0x00c0, 0xff5: 0x00c0, + 0xff6: 0x00c0, 0xff7: 0x00c0, 0xff8: 0x00c0, 0xff9: 0x00c0, 0xffa: 0x00c0, 0xffb: 0x0080, + 0xffc: 0x0080, 0xffd: 0x00c0, 0xffe: 0x00c0, 0xfff: 0x00c0, + // Block 0x40, offset 0x1000 + 0x1000: 0x0040, 0x1001: 0x0040, 0x1002: 0x0040, 0x1003: 0x0040, 0x1004: 0x0040, 0x1005: 0x0040, + 0x1006: 0x0040, 0x1007: 0x0040, 0x1008: 0x0040, 0x1009: 0x0040, 0x100a: 0x0040, 0x100b: 0x0040, + 0x100c: 0x0040, 0x100d: 0x0040, 0x100e: 0x0040, 0x100f: 0x0040, 0x1010: 0x0040, 0x1011: 0x0040, + 0x1012: 0x0040, 0x1013: 0x0040, 0x1014: 0x0040, 0x1015: 0x0040, 0x1016: 0x0040, 0x1017: 0x0040, + 0x1018: 0x0040, 0x1019: 0x0040, 0x101a: 0x0040, 0x101b: 0x0040, 0x101c: 0x0040, 0x101d: 0x0040, + 0x101e: 0x0040, 0x101f: 0x0040, 0x1020: 0x0040, 0x1021: 0x0040, 0x1022: 0x0040, 0x1023: 0x0040, + 0x1024: 0x0040, 0x1025: 0x0040, 0x1026: 0x0040, 0x1027: 0x0040, 0x1028: 0x0040, 0x1029: 0x0040, + 0x102a: 0x0040, 0x102b: 0x0040, 0x102c: 0x0040, 0x102d: 0x0040, 0x102e: 0x0040, 0x102f: 0x0040, + 0x1030: 0x0040, 0x1031: 0x0040, 0x1032: 0x0040, 0x1033: 0x0040, 0x1034: 0x0040, 0x1035: 0x0040, + 0x1036: 0x0040, 0x1037: 0x0040, 0x1038: 0x0040, 0x1039: 0x0040, 0x103a: 0x0040, 0x103b: 0x0040, + 0x103c: 0x0040, 0x103d: 0x0040, 0x103e: 0x0040, 0x103f: 0x0040, + // Block 0x41, offset 0x1040 + 0x1040: 0x00c0, 0x1041: 0x00c0, 0x1042: 0x00c0, 0x1043: 0x00c0, 0x1044: 0x00c0, 0x1045: 0x00c0, + 0x1046: 0x00c0, 0x1047: 0x00c0, 0x1048: 0x00c0, 0x104a: 0x00c0, 0x104b: 0x00c0, + 0x104c: 0x00c0, 0x104d: 0x00c0, 0x1050: 0x00c0, 0x1051: 0x00c0, + 0x1052: 0x00c0, 0x1053: 0x00c0, 0x1054: 0x00c0, 0x1055: 0x00c0, 0x1056: 0x00c0, + 0x1058: 0x00c0, 0x105a: 0x00c0, 0x105b: 0x00c0, 0x105c: 0x00c0, 0x105d: 0x00c0, + 0x1060: 0x00c0, 0x1061: 0x00c0, 0x1062: 0x00c0, 0x1063: 0x00c0, + 0x1064: 0x00c0, 0x1065: 0x00c0, 0x1066: 0x00c0, 0x1067: 0x00c0, 0x1068: 0x00c0, 0x1069: 0x00c0, + 0x106a: 0x00c0, 0x106b: 0x00c0, 0x106c: 0x00c0, 0x106d: 0x00c0, 0x106e: 0x00c0, 0x106f: 0x00c0, + 0x1070: 0x00c0, 0x1071: 0x00c0, 0x1072: 0x00c0, 0x1073: 0x00c0, 0x1074: 0x00c0, 0x1075: 0x00c0, + 0x1076: 0x00c0, 0x1077: 0x00c0, 0x1078: 0x00c0, 0x1079: 0x00c0, 0x107a: 0x00c0, 0x107b: 0x00c0, + 0x107c: 0x00c0, 0x107d: 0x00c0, 0x107e: 0x00c0, 0x107f: 0x00c0, + // Block 0x42, offset 0x1080 + 0x1080: 0x00c0, 0x1081: 0x00c0, 0x1082: 0x00c0, 0x1083: 0x00c0, 0x1084: 0x00c0, 0x1085: 0x00c0, + 0x1086: 0x00c0, 0x1087: 0x00c0, 0x1088: 0x00c0, 0x108a: 0x00c0, 0x108b: 0x00c0, + 0x108c: 0x00c0, 0x108d: 0x00c0, 0x1090: 0x00c0, 0x1091: 0x00c0, + 0x1092: 0x00c0, 0x1093: 0x00c0, 0x1094: 0x00c0, 0x1095: 0x00c0, 0x1096: 0x00c0, 0x1097: 0x00c0, + 0x1098: 0x00c0, 0x1099: 0x00c0, 0x109a: 0x00c0, 0x109b: 0x00c0, 0x109c: 0x00c0, 0x109d: 0x00c0, + 0x109e: 0x00c0, 0x109f: 0x00c0, 0x10a0: 0x00c0, 0x10a1: 0x00c0, 0x10a2: 0x00c0, 0x10a3: 0x00c0, + 0x10a4: 0x00c0, 0x10a5: 0x00c0, 0x10a6: 0x00c0, 0x10a7: 0x00c0, 0x10a8: 0x00c0, 0x10a9: 0x00c0, + 0x10aa: 0x00c0, 0x10ab: 0x00c0, 0x10ac: 0x00c0, 0x10ad: 0x00c0, 0x10ae: 0x00c0, 0x10af: 0x00c0, + 0x10b0: 0x00c0, 0x10b2: 0x00c0, 0x10b3: 0x00c0, 0x10b4: 0x00c0, 0x10b5: 0x00c0, + 0x10b8: 0x00c0, 0x10b9: 0x00c0, 0x10ba: 0x00c0, 0x10bb: 0x00c0, + 0x10bc: 0x00c0, 0x10bd: 0x00c0, 0x10be: 0x00c0, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x00c0, 0x10c2: 0x00c0, 0x10c3: 0x00c0, 0x10c4: 0x00c0, 0x10c5: 0x00c0, + 0x10c8: 0x00c0, 0x10c9: 0x00c0, 0x10ca: 0x00c0, 0x10cb: 0x00c0, + 0x10cc: 0x00c0, 0x10cd: 0x00c0, 0x10ce: 0x00c0, 0x10cf: 0x00c0, 0x10d0: 0x00c0, 0x10d1: 0x00c0, + 0x10d2: 0x00c0, 0x10d3: 0x00c0, 0x10d4: 0x00c0, 0x10d5: 0x00c0, 0x10d6: 0x00c0, + 0x10d8: 0x00c0, 0x10d9: 0x00c0, 0x10da: 0x00c0, 0x10db: 0x00c0, 0x10dc: 0x00c0, 0x10dd: 0x00c0, + 0x10de: 0x00c0, 0x10df: 0x00c0, 0x10e0: 0x00c0, 0x10e1: 0x00c0, 0x10e2: 0x00c0, 0x10e3: 0x00c0, + 0x10e4: 0x00c0, 0x10e5: 0x00c0, 0x10e6: 0x00c0, 0x10e7: 0x00c0, 0x10e8: 0x00c0, 0x10e9: 0x00c0, + 0x10ea: 0x00c0, 0x10eb: 0x00c0, 0x10ec: 0x00c0, 0x10ed: 0x00c0, 0x10ee: 0x00c0, 0x10ef: 0x00c0, + 0x10f0: 0x00c0, 0x10f1: 0x00c0, 0x10f2: 0x00c0, 0x10f3: 0x00c0, 0x10f4: 0x00c0, 0x10f5: 0x00c0, + 0x10f6: 0x00c0, 0x10f7: 0x00c0, 0x10f8: 0x00c0, 0x10f9: 0x00c0, 0x10fa: 0x00c0, 0x10fb: 0x00c0, + 0x10fc: 0x00c0, 0x10fd: 0x00c0, 0x10fe: 0x00c0, 0x10ff: 0x00c0, + // Block 0x44, offset 0x1100 + 0x1100: 0x00c0, 0x1101: 0x00c0, 0x1102: 0x00c0, 0x1103: 0x00c0, 0x1104: 0x00c0, 0x1105: 0x00c0, + 0x1106: 0x00c0, 0x1107: 0x00c0, 0x1108: 0x00c0, 0x1109: 0x00c0, 0x110a: 0x00c0, 0x110b: 0x00c0, + 0x110c: 0x00c0, 0x110d: 0x00c0, 0x110e: 0x00c0, 0x110f: 0x00c0, 0x1110: 0x00c0, + 0x1112: 0x00c0, 0x1113: 0x00c0, 0x1114: 0x00c0, 0x1115: 0x00c0, + 0x1118: 0x00c0, 0x1119: 0x00c0, 0x111a: 0x00c0, 0x111b: 0x00c0, 0x111c: 0x00c0, 0x111d: 0x00c0, + 0x111e: 0x00c0, 0x111f: 0x00c0, 0x1120: 0x00c0, 0x1121: 0x00c0, 0x1122: 0x00c0, 0x1123: 0x00c0, + 0x1124: 0x00c0, 0x1125: 0x00c0, 0x1126: 0x00c0, 0x1127: 0x00c0, 0x1128: 0x00c0, 0x1129: 0x00c0, + 0x112a: 0x00c0, 0x112b: 0x00c0, 0x112c: 0x00c0, 0x112d: 0x00c0, 0x112e: 0x00c0, 0x112f: 0x00c0, + 0x1130: 0x00c0, 0x1131: 0x00c0, 0x1132: 0x00c0, 0x1133: 0x00c0, 0x1134: 0x00c0, 0x1135: 0x00c0, + 0x1136: 0x00c0, 0x1137: 0x00c0, 0x1138: 0x00c0, 0x1139: 0x00c0, 0x113a: 0x00c0, 0x113b: 0x00c0, + 0x113c: 0x00c0, 0x113d: 0x00c0, 0x113e: 0x00c0, 0x113f: 0x00c0, + // Block 0x45, offset 0x1140 + 0x1140: 0x00c0, 0x1141: 0x00c0, 0x1142: 0x00c0, 0x1143: 0x00c0, 0x1144: 0x00c0, 0x1145: 0x00c0, + 0x1146: 0x00c0, 0x1147: 0x00c0, 0x1148: 0x00c0, 0x1149: 0x00c0, 0x114a: 0x00c0, 0x114b: 0x00c0, + 0x114c: 0x00c0, 0x114d: 0x00c0, 0x114e: 0x00c0, 0x114f: 0x00c0, 0x1150: 0x00c0, 0x1151: 0x00c0, + 0x1152: 0x00c0, 0x1153: 0x00c0, 0x1154: 0x00c0, 0x1155: 0x00c0, 0x1156: 0x00c0, 0x1157: 0x00c0, + 0x1158: 0x00c0, 0x1159: 0x00c0, 0x115a: 0x00c0, 0x115d: 0x00c3, + 0x115e: 0x00c3, 0x115f: 0x00c3, 0x1160: 0x0080, 0x1161: 0x0080, 0x1162: 0x0080, 0x1163: 0x0080, + 0x1164: 0x0080, 0x1165: 0x0080, 0x1166: 0x0080, 0x1167: 0x0080, 0x1168: 0x0080, 0x1169: 0x0080, + 0x116a: 0x0080, 0x116b: 0x0080, 0x116c: 0x0080, 0x116d: 0x0080, 0x116e: 0x0080, 0x116f: 0x0080, + 0x1170: 0x0080, 0x1171: 0x0080, 0x1172: 0x0080, 0x1173: 0x0080, 0x1174: 0x0080, 0x1175: 0x0080, + 0x1176: 0x0080, 0x1177: 0x0080, 0x1178: 0x0080, 0x1179: 0x0080, 0x117a: 0x0080, 0x117b: 0x0080, + 0x117c: 0x0080, + // Block 0x46, offset 0x1180 + 0x1180: 0x00c0, 0x1181: 0x00c0, 0x1182: 0x00c0, 0x1183: 0x00c0, 0x1184: 0x00c0, 0x1185: 0x00c0, + 0x1186: 0x00c0, 0x1187: 0x00c0, 0x1188: 0x00c0, 0x1189: 0x00c0, 0x118a: 0x00c0, 0x118b: 0x00c0, + 0x118c: 0x00c0, 0x118d: 0x00c0, 0x118e: 0x00c0, 0x118f: 0x00c0, 0x1190: 0x0080, 0x1191: 0x0080, + 0x1192: 0x0080, 0x1193: 0x0080, 0x1194: 0x0080, 0x1195: 0x0080, 0x1196: 0x0080, 0x1197: 0x0080, + 0x1198: 0x0080, 0x1199: 0x0080, + 0x11a0: 0x00c0, 0x11a1: 0x00c0, 0x11a2: 0x00c0, 0x11a3: 0x00c0, + 0x11a4: 0x00c0, 0x11a5: 0x00c0, 0x11a6: 0x00c0, 0x11a7: 0x00c0, 0x11a8: 0x00c0, 0x11a9: 0x00c0, + 0x11aa: 0x00c0, 0x11ab: 0x00c0, 0x11ac: 0x00c0, 0x11ad: 0x00c0, 0x11ae: 0x00c0, 0x11af: 0x00c0, + 0x11b0: 0x00c0, 0x11b1: 0x00c0, 0x11b2: 0x00c0, 0x11b3: 0x00c0, 0x11b4: 0x00c0, 0x11b5: 0x00c0, + 0x11b6: 0x00c0, 0x11b7: 0x00c0, 0x11b8: 0x00c0, 0x11b9: 0x00c0, 0x11ba: 0x00c0, 0x11bb: 0x00c0, + 0x11bc: 0x00c0, 0x11bd: 0x00c0, 0x11be: 0x00c0, 0x11bf: 0x00c0, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x00c0, 0x11c1: 0x00c0, 0x11c2: 0x00c0, 0x11c3: 0x00c0, 0x11c4: 0x00c0, 0x11c5: 0x00c0, + 0x11c6: 0x00c0, 0x11c7: 0x00c0, 0x11c8: 0x00c0, 0x11c9: 0x00c0, 0x11ca: 0x00c0, 0x11cb: 0x00c0, + 0x11cc: 0x00c0, 0x11cd: 0x00c0, 0x11ce: 0x00c0, 0x11cf: 0x00c0, 0x11d0: 0x00c0, 0x11d1: 0x00c0, + 0x11d2: 0x00c0, 0x11d3: 0x00c0, 0x11d4: 0x00c0, 0x11d5: 0x00c0, 0x11d6: 0x00c0, 0x11d7: 0x00c0, + 0x11d8: 0x00c0, 0x11d9: 0x00c0, 0x11da: 0x00c0, 0x11db: 0x00c0, 0x11dc: 0x00c0, 0x11dd: 0x00c0, + 0x11de: 0x00c0, 0x11df: 0x00c0, 0x11e0: 0x00c0, 0x11e1: 0x00c0, 0x11e2: 0x00c0, 0x11e3: 0x00c0, + 0x11e4: 0x00c0, 0x11e5: 0x00c0, 0x11e6: 0x00c0, 0x11e7: 0x00c0, 0x11e8: 0x00c0, 0x11e9: 0x00c0, + 0x11ea: 0x00c0, 0x11eb: 0x00c0, 0x11ec: 0x00c0, 0x11ed: 0x00c0, 0x11ee: 0x00c0, 0x11ef: 0x00c0, + 0x11f0: 0x00c0, 0x11f1: 0x00c0, 0x11f2: 0x00c0, 0x11f3: 0x00c0, 0x11f4: 0x00c0, 0x11f5: 0x00c0, + 0x11f8: 0x00c0, 0x11f9: 0x00c0, 0x11fa: 0x00c0, 0x11fb: 0x00c0, + 0x11fc: 0x00c0, 0x11fd: 0x00c0, + // Block 0x48, offset 0x1200 + 0x1200: 0x0080, 0x1201: 0x00c0, 0x1202: 0x00c0, 0x1203: 0x00c0, 0x1204: 0x00c0, 0x1205: 0x00c0, + 0x1206: 0x00c0, 0x1207: 0x00c0, 0x1208: 0x00c0, 0x1209: 0x00c0, 0x120a: 0x00c0, 0x120b: 0x00c0, + 0x120c: 0x00c0, 0x120d: 0x00c0, 0x120e: 0x00c0, 0x120f: 0x00c0, 0x1210: 0x00c0, 0x1211: 0x00c0, + 0x1212: 0x00c0, 0x1213: 0x00c0, 0x1214: 0x00c0, 0x1215: 0x00c0, 0x1216: 0x00c0, 0x1217: 0x00c0, + 0x1218: 0x00c0, 0x1219: 0x00c0, 0x121a: 0x00c0, 0x121b: 0x00c0, 0x121c: 0x00c0, 0x121d: 0x00c0, + 0x121e: 0x00c0, 0x121f: 0x00c0, 0x1220: 0x00c0, 0x1221: 0x00c0, 0x1222: 0x00c0, 0x1223: 0x00c0, + 0x1224: 0x00c0, 0x1225: 0x00c0, 0x1226: 0x00c0, 0x1227: 0x00c0, 0x1228: 0x00c0, 0x1229: 0x00c0, + 0x122a: 0x00c0, 0x122b: 0x00c0, 0x122c: 0x00c0, 0x122d: 0x00c0, 0x122e: 0x00c0, 0x122f: 0x00c0, + 0x1230: 0x00c0, 0x1231: 0x00c0, 0x1232: 0x00c0, 0x1233: 0x00c0, 0x1234: 0x00c0, 0x1235: 0x00c0, + 0x1236: 0x00c0, 0x1237: 0x00c0, 0x1238: 0x00c0, 0x1239: 0x00c0, 0x123a: 0x00c0, 0x123b: 0x00c0, + 0x123c: 0x00c0, 0x123d: 0x00c0, 0x123e: 0x00c0, 0x123f: 0x00c0, + // Block 0x49, offset 0x1240 + 0x1240: 0x00c0, 0x1241: 0x00c0, 0x1242: 0x00c0, 0x1243: 0x00c0, 0x1244: 0x00c0, 0x1245: 0x00c0, + 0x1246: 0x00c0, 0x1247: 0x00c0, 0x1248: 0x00c0, 0x1249: 0x00c0, 0x124a: 0x00c0, 0x124b: 0x00c0, + 0x124c: 0x00c0, 0x124d: 0x00c0, 0x124e: 0x00c0, 0x124f: 0x00c0, 0x1250: 0x00c0, 0x1251: 0x00c0, + 0x1252: 0x00c0, 0x1253: 0x00c0, 0x1254: 0x00c0, 0x1255: 0x00c0, 0x1256: 0x00c0, 0x1257: 0x00c0, + 0x1258: 0x00c0, 0x1259: 0x00c0, 0x125a: 0x00c0, 0x125b: 0x00c0, 0x125c: 0x00c0, 0x125d: 0x00c0, + 0x125e: 0x00c0, 0x125f: 0x00c0, 0x1260: 0x00c0, 0x1261: 0x00c0, 0x1262: 0x00c0, 0x1263: 0x00c0, + 0x1264: 0x00c0, 0x1265: 0x00c0, 0x1266: 0x00c0, 0x1267: 0x00c0, 0x1268: 0x00c0, 0x1269: 0x00c0, + 0x126a: 0x00c0, 0x126b: 0x00c0, 0x126c: 0x00c0, 0x126d: 0x0080, 0x126e: 0x0080, 0x126f: 0x00c0, + 0x1270: 0x00c0, 0x1271: 0x00c0, 0x1272: 0x00c0, 0x1273: 0x00c0, 0x1274: 0x00c0, 0x1275: 0x00c0, + 0x1276: 0x00c0, 0x1277: 0x00c0, 0x1278: 0x00c0, 0x1279: 0x00c0, 0x127a: 0x00c0, 0x127b: 0x00c0, + 0x127c: 0x00c0, 0x127d: 0x00c0, 0x127e: 0x00c0, 0x127f: 0x00c0, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0080, 0x1281: 0x00c0, 0x1282: 0x00c0, 0x1283: 0x00c0, 0x1284: 0x00c0, 0x1285: 0x00c0, + 0x1286: 0x00c0, 0x1287: 0x00c0, 0x1288: 0x00c0, 0x1289: 0x00c0, 0x128a: 0x00c0, 0x128b: 0x00c0, + 0x128c: 0x00c0, 0x128d: 0x00c0, 0x128e: 0x00c0, 0x128f: 0x00c0, 0x1290: 0x00c0, 0x1291: 0x00c0, + 0x1292: 0x00c0, 0x1293: 0x00c0, 0x1294: 0x00c0, 0x1295: 0x00c0, 0x1296: 0x00c0, 0x1297: 0x00c0, + 0x1298: 0x00c0, 0x1299: 0x00c0, 0x129a: 0x00c0, 0x129b: 0x0080, 0x129c: 0x0080, + 0x12a0: 0x00c0, 0x12a1: 0x00c0, 0x12a2: 0x00c0, 0x12a3: 0x00c0, + 0x12a4: 0x00c0, 0x12a5: 0x00c0, 0x12a6: 0x00c0, 0x12a7: 0x00c0, 0x12a8: 0x00c0, 0x12a9: 0x00c0, + 0x12aa: 0x00c0, 0x12ab: 0x00c0, 0x12ac: 0x00c0, 0x12ad: 0x00c0, 0x12ae: 0x00c0, 0x12af: 0x00c0, + 0x12b0: 0x00c0, 0x12b1: 0x00c0, 0x12b2: 0x00c0, 0x12b3: 0x00c0, 0x12b4: 0x00c0, 0x12b5: 0x00c0, + 0x12b6: 0x00c0, 0x12b7: 0x00c0, 0x12b8: 0x00c0, 0x12b9: 0x00c0, 0x12ba: 0x00c0, 0x12bb: 0x00c0, + 0x12bc: 0x00c0, 0x12bd: 0x00c0, 0x12be: 0x00c0, 0x12bf: 0x00c0, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x00c0, 0x12c1: 0x00c0, 0x12c2: 0x00c0, 0x12c3: 0x00c0, 0x12c4: 0x00c0, 0x12c5: 0x00c0, + 0x12c6: 0x00c0, 0x12c7: 0x00c0, 0x12c8: 0x00c0, 0x12c9: 0x00c0, 0x12ca: 0x00c0, 0x12cb: 0x00c0, + 0x12cc: 0x00c0, 0x12cd: 0x00c0, 0x12ce: 0x00c0, 0x12cf: 0x00c0, 0x12d0: 0x00c0, 0x12d1: 0x00c0, + 0x12d2: 0x00c0, 0x12d3: 0x00c0, 0x12d4: 0x00c0, 0x12d5: 0x00c0, 0x12d6: 0x00c0, 0x12d7: 0x00c0, + 0x12d8: 0x00c0, 0x12d9: 0x00c0, 0x12da: 0x00c0, 0x12db: 0x00c0, 0x12dc: 0x00c0, 0x12dd: 0x00c0, + 0x12de: 0x00c0, 0x12df: 0x00c0, 0x12e0: 0x00c0, 0x12e1: 0x00c0, 0x12e2: 0x00c0, 0x12e3: 0x00c0, + 0x12e4: 0x00c0, 0x12e5: 0x00c0, 0x12e6: 0x00c0, 0x12e7: 0x00c0, 0x12e8: 0x00c0, 0x12e9: 0x00c0, + 0x12ea: 0x00c0, 0x12eb: 0x0080, 0x12ec: 0x0080, 0x12ed: 0x0080, 0x12ee: 0x0080, 0x12ef: 0x0080, + 0x12f0: 0x0080, 0x12f1: 0x00c0, 0x12f2: 0x00c0, 0x12f3: 0x00c0, 0x12f4: 0x00c0, 0x12f5: 0x00c0, + 0x12f6: 0x00c0, 0x12f7: 0x00c0, 0x12f8: 0x00c0, + // Block 0x4c, offset 0x1300 + 0x1300: 0x00c0, 0x1301: 0x00c0, 0x1302: 0x00c0, 0x1303: 0x00c0, 0x1304: 0x00c0, 0x1305: 0x00c0, + 0x1306: 0x00c0, 0x1307: 0x00c0, 0x1308: 0x00c0, 0x1309: 0x00c0, 0x130a: 0x00c0, 0x130b: 0x00c0, + 0x130c: 0x00c0, 0x130e: 0x00c0, 0x130f: 0x00c0, 0x1310: 0x00c0, 0x1311: 0x00c0, + 0x1312: 0x00c3, 0x1313: 0x00c3, 0x1314: 0x00c6, + 0x1320: 0x00c0, 0x1321: 0x00c0, 0x1322: 0x00c0, 0x1323: 0x00c0, + 0x1324: 0x00c0, 0x1325: 0x00c0, 0x1326: 0x00c0, 0x1327: 0x00c0, 0x1328: 0x00c0, 0x1329: 0x00c0, + 0x132a: 0x00c0, 0x132b: 0x00c0, 0x132c: 0x00c0, 0x132d: 0x00c0, 0x132e: 0x00c0, 0x132f: 0x00c0, + 0x1330: 0x00c0, 0x1331: 0x00c0, 0x1332: 0x00c3, 0x1333: 0x00c3, 0x1334: 0x00c6, 0x1335: 0x0080, + 0x1336: 0x0080, + // Block 0x4d, offset 0x1340 + 0x1340: 0x00c0, 0x1341: 0x00c0, 0x1342: 0x00c0, 0x1343: 0x00c0, 0x1344: 0x00c0, 0x1345: 0x00c0, + 0x1346: 0x00c0, 0x1347: 0x00c0, 0x1348: 0x00c0, 0x1349: 0x00c0, 0x134a: 0x00c0, 0x134b: 0x00c0, + 0x134c: 0x00c0, 0x134d: 0x00c0, 0x134e: 0x00c0, 0x134f: 0x00c0, 0x1350: 0x00c0, 0x1351: 0x00c0, + 0x1352: 0x00c3, 0x1353: 0x00c3, + 0x1360: 0x00c0, 0x1361: 0x00c0, 0x1362: 0x00c0, 0x1363: 0x00c0, + 0x1364: 0x00c0, 0x1365: 0x00c0, 0x1366: 0x00c0, 0x1367: 0x00c0, 0x1368: 0x00c0, 0x1369: 0x00c0, + 0x136a: 0x00c0, 0x136b: 0x00c0, 0x136c: 0x00c0, 0x136e: 0x00c0, 0x136f: 0x00c0, + 0x1370: 0x00c0, 0x1372: 0x00c3, 0x1373: 0x00c3, + // Block 0x4e, offset 0x1380 + 0x1380: 0x00c0, 0x1381: 0x00c0, 0x1382: 0x00c0, 0x1383: 0x00c0, 0x1384: 0x00c0, 0x1385: 0x00c0, + 0x1386: 0x00c0, 0x1387: 0x00c0, 0x1388: 0x00c0, 0x1389: 0x00c0, 0x138a: 0x00c0, 0x138b: 0x00c0, + 0x138c: 0x00c0, 0x138d: 0x00c0, 0x138e: 0x00c0, 0x138f: 0x00c0, 0x1390: 0x00c0, 0x1391: 0x00c0, + 0x1392: 0x00c0, 0x1393: 0x00c0, 0x1394: 0x00c0, 0x1395: 0x00c0, 0x1396: 0x00c0, 0x1397: 0x00c0, + 0x1398: 0x00c0, 0x1399: 0x00c0, 0x139a: 0x00c0, 0x139b: 0x00c0, 0x139c: 0x00c0, 0x139d: 0x00c0, + 0x139e: 0x00c0, 0x139f: 0x00c0, 0x13a0: 0x00c0, 0x13a1: 0x00c0, 0x13a2: 0x00c0, 0x13a3: 0x00c0, + 0x13a4: 0x00c0, 0x13a5: 0x00c0, 0x13a6: 0x00c0, 0x13a7: 0x00c0, 0x13a8: 0x00c0, 0x13a9: 0x00c0, + 0x13aa: 0x00c0, 0x13ab: 0x00c0, 0x13ac: 0x00c0, 0x13ad: 0x00c0, 0x13ae: 0x00c0, 0x13af: 0x00c0, + 0x13b0: 0x00c0, 0x13b1: 0x00c0, 0x13b2: 0x00c0, 0x13b3: 0x00c0, 0x13b4: 0x0040, 0x13b5: 0x0040, + 0x13b6: 0x00c0, 0x13b7: 0x00c3, 0x13b8: 0x00c3, 0x13b9: 0x00c3, 0x13ba: 0x00c3, 0x13bb: 0x00c3, + 0x13bc: 0x00c3, 0x13bd: 0x00c3, 0x13be: 0x00c0, 0x13bf: 0x00c0, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x00c0, 0x13c1: 0x00c0, 0x13c2: 0x00c0, 0x13c3: 0x00c0, 0x13c4: 0x00c0, 0x13c5: 0x00c0, + 0x13c6: 0x00c3, 0x13c7: 0x00c0, 0x13c8: 0x00c0, 0x13c9: 0x00c3, 0x13ca: 0x00c3, 0x13cb: 0x00c3, + 0x13cc: 0x00c3, 0x13cd: 0x00c3, 0x13ce: 0x00c3, 0x13cf: 0x00c3, 0x13d0: 0x00c3, 0x13d1: 0x00c3, + 0x13d2: 0x00c6, 0x13d3: 0x00c3, 0x13d4: 0x0080, 0x13d5: 0x0080, 0x13d6: 0x0080, 0x13d7: 0x00c0, + 0x13d8: 0x0080, 0x13d9: 0x0080, 0x13da: 0x0080, 0x13db: 0x0080, 0x13dc: 0x00c0, 0x13dd: 0x00c3, + 0x13e0: 0x00c0, 0x13e1: 0x00c0, 0x13e2: 0x00c0, 0x13e3: 0x00c0, + 0x13e4: 0x00c0, 0x13e5: 0x00c0, 0x13e6: 0x00c0, 0x13e7: 0x00c0, 0x13e8: 0x00c0, 0x13e9: 0x00c0, + 0x13f0: 0x0080, 0x13f1: 0x0080, 0x13f2: 0x0080, 0x13f3: 0x0080, 0x13f4: 0x0080, 0x13f5: 0x0080, + 0x13f6: 0x0080, 0x13f7: 0x0080, 0x13f8: 0x0080, 0x13f9: 0x0080, + // Block 0x50, offset 0x1400 + 0x1400: 0x0080, 0x1401: 0x0080, 0x1402: 0x0080, 0x1403: 0x0080, 0x1404: 0x0080, 0x1405: 0x0080, + 0x1406: 0x0080, 0x1407: 0x0082, 0x1408: 0x0080, 0x1409: 0x0080, 0x140a: 0x0080, 0x140b: 0x0040, + 0x140c: 0x0040, 0x140d: 0x0040, 0x140e: 0x0040, 0x1410: 0x00c0, 0x1411: 0x00c0, + 0x1412: 0x00c0, 0x1413: 0x00c0, 0x1414: 0x00c0, 0x1415: 0x00c0, 0x1416: 0x00c0, 0x1417: 0x00c0, + 0x1418: 0x00c0, 0x1419: 0x00c0, + 0x1420: 0x00c2, 0x1421: 0x00c2, 0x1422: 0x00c2, 0x1423: 0x00c2, + 0x1424: 0x00c2, 0x1425: 0x00c2, 0x1426: 0x00c2, 0x1427: 0x00c2, 0x1428: 0x00c2, 0x1429: 0x00c2, + 0x142a: 0x00c2, 0x142b: 0x00c2, 0x142c: 0x00c2, 0x142d: 0x00c2, 0x142e: 0x00c2, 0x142f: 0x00c2, + 0x1430: 0x00c2, 0x1431: 0x00c2, 0x1432: 0x00c2, 0x1433: 0x00c2, 0x1434: 0x00c2, 0x1435: 0x00c2, + 0x1436: 0x00c2, 0x1437: 0x00c2, 0x1438: 0x00c2, 0x1439: 0x00c2, 0x143a: 0x00c2, 0x143b: 0x00c2, + 0x143c: 0x00c2, 0x143d: 0x00c2, 0x143e: 0x00c2, 0x143f: 0x00c2, + // Block 0x51, offset 0x1440 + 0x1440: 0x00c2, 0x1441: 0x00c2, 0x1442: 0x00c2, 0x1443: 0x00c2, 0x1444: 0x00c2, 0x1445: 0x00c2, + 0x1446: 0x00c2, 0x1447: 0x00c2, 0x1448: 0x00c2, 0x1449: 0x00c2, 0x144a: 0x00c2, 0x144b: 0x00c2, + 0x144c: 0x00c2, 0x144d: 0x00c2, 0x144e: 0x00c2, 0x144f: 0x00c2, 0x1450: 0x00c2, 0x1451: 0x00c2, + 0x1452: 0x00c2, 0x1453: 0x00c2, 0x1454: 0x00c2, 0x1455: 0x00c2, 0x1456: 0x00c2, 0x1457: 0x00c2, + 0x1458: 0x00c2, 0x1459: 0x00c2, 0x145a: 0x00c2, 0x145b: 0x00c2, 0x145c: 0x00c2, 0x145d: 0x00c2, + 0x145e: 0x00c2, 0x145f: 0x00c2, 0x1460: 0x00c2, 0x1461: 0x00c2, 0x1462: 0x00c2, 0x1463: 0x00c2, + 0x1464: 0x00c2, 0x1465: 0x00c2, 0x1466: 0x00c2, 0x1467: 0x00c2, 0x1468: 0x00c2, 0x1469: 0x00c2, + 0x146a: 0x00c2, 0x146b: 0x00c2, 0x146c: 0x00c2, 0x146d: 0x00c2, 0x146e: 0x00c2, 0x146f: 0x00c2, + 0x1470: 0x00c2, 0x1471: 0x00c2, 0x1472: 0x00c2, 0x1473: 0x00c2, 0x1474: 0x00c2, 0x1475: 0x00c2, + 0x1476: 0x00c2, 0x1477: 0x00c2, + // Block 0x52, offset 0x1480 + 0x1480: 0x00c0, 0x1481: 0x00c0, 0x1482: 0x00c0, 0x1483: 0x00c0, 0x1484: 0x00c0, 0x1485: 0x00c3, + 0x1486: 0x00c3, 0x1487: 0x00c2, 0x1488: 0x00c2, 0x1489: 0x00c2, 0x148a: 0x00c2, 0x148b: 0x00c2, + 0x148c: 0x00c2, 0x148d: 0x00c2, 0x148e: 0x00c2, 0x148f: 0x00c2, 0x1490: 0x00c2, 0x1491: 0x00c2, + 0x1492: 0x00c2, 0x1493: 0x00c2, 0x1494: 0x00c2, 0x1495: 0x00c2, 0x1496: 0x00c2, 0x1497: 0x00c2, + 0x1498: 0x00c2, 0x1499: 0x00c2, 0x149a: 0x00c2, 0x149b: 0x00c2, 0x149c: 0x00c2, 0x149d: 0x00c2, + 0x149e: 0x00c2, 0x149f: 0x00c2, 0x14a0: 0x00c2, 0x14a1: 0x00c2, 0x14a2: 0x00c2, 0x14a3: 0x00c2, + 0x14a4: 0x00c2, 0x14a5: 0x00c2, 0x14a6: 0x00c2, 0x14a7: 0x00c2, 0x14a8: 0x00c2, 0x14a9: 0x00c3, + 0x14aa: 0x00c2, + 0x14b0: 0x00c0, 0x14b1: 0x00c0, 0x14b2: 0x00c0, 0x14b3: 0x00c0, 0x14b4: 0x00c0, 0x14b5: 0x00c0, + 0x14b6: 0x00c0, 0x14b7: 0x00c0, 0x14b8: 0x00c0, 0x14b9: 0x00c0, 0x14ba: 0x00c0, 0x14bb: 0x00c0, + 0x14bc: 0x00c0, 0x14bd: 0x00c0, 0x14be: 0x00c0, 0x14bf: 0x00c0, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x00c0, 0x14c1: 0x00c0, 0x14c2: 0x00c0, 0x14c3: 0x00c0, 0x14c4: 0x00c0, 0x14c5: 0x00c0, + 0x14c6: 0x00c0, 0x14c7: 0x00c0, 0x14c8: 0x00c0, 0x14c9: 0x00c0, 0x14ca: 0x00c0, 0x14cb: 0x00c0, + 0x14cc: 0x00c0, 0x14cd: 0x00c0, 0x14ce: 0x00c0, 0x14cf: 0x00c0, 0x14d0: 0x00c0, 0x14d1: 0x00c0, + 0x14d2: 0x00c0, 0x14d3: 0x00c0, 0x14d4: 0x00c0, 0x14d5: 0x00c0, 0x14d6: 0x00c0, 0x14d7: 0x00c0, + 0x14d8: 0x00c0, 0x14d9: 0x00c0, 0x14da: 0x00c0, 0x14db: 0x00c0, 0x14dc: 0x00c0, 0x14dd: 0x00c0, + 0x14de: 0x00c0, 0x14df: 0x00c0, 0x14e0: 0x00c0, 0x14e1: 0x00c0, 0x14e2: 0x00c0, 0x14e3: 0x00c0, + 0x14e4: 0x00c0, 0x14e5: 0x00c0, 0x14e6: 0x00c0, 0x14e7: 0x00c0, 0x14e8: 0x00c0, 0x14e9: 0x00c0, + 0x14ea: 0x00c0, 0x14eb: 0x00c0, 0x14ec: 0x00c0, 0x14ed: 0x00c0, 0x14ee: 0x00c0, 0x14ef: 0x00c0, + 0x14f0: 0x00c0, 0x14f1: 0x00c0, 0x14f2: 0x00c0, 0x14f3: 0x00c0, 0x14f4: 0x00c0, 0x14f5: 0x00c0, + // Block 0x54, offset 0x1500 + 0x1500: 0x00c0, 0x1501: 0x00c0, 0x1502: 0x00c0, 0x1503: 0x00c0, 0x1504: 0x00c0, 0x1505: 0x00c0, + 0x1506: 0x00c0, 0x1507: 0x00c0, 0x1508: 0x00c0, 0x1509: 0x00c0, 0x150a: 0x00c0, 0x150b: 0x00c0, + 0x150c: 0x00c0, 0x150d: 0x00c0, 0x150e: 0x00c0, 0x150f: 0x00c0, 0x1510: 0x00c0, 0x1511: 0x00c0, + 0x1512: 0x00c0, 0x1513: 0x00c0, 0x1514: 0x00c0, 0x1515: 0x00c0, 0x1516: 0x00c0, 0x1517: 0x00c0, + 0x1518: 0x00c0, 0x1519: 0x00c0, 0x151a: 0x00c0, 0x151b: 0x00c0, 0x151c: 0x00c0, 0x151d: 0x00c0, + 0x151e: 0x00c0, 0x1520: 0x00c3, 0x1521: 0x00c3, 0x1522: 0x00c3, 0x1523: 0x00c0, + 0x1524: 0x00c0, 0x1525: 0x00c0, 0x1526: 0x00c0, 0x1527: 0x00c3, 0x1528: 0x00c3, 0x1529: 0x00c0, + 0x152a: 0x00c0, 0x152b: 0x00c0, + 0x1530: 0x00c0, 0x1531: 0x00c0, 0x1532: 0x00c3, 0x1533: 0x00c0, 0x1534: 0x00c0, 0x1535: 0x00c0, + 0x1536: 0x00c0, 0x1537: 0x00c0, 0x1538: 0x00c0, 0x1539: 0x00c3, 0x153a: 0x00c3, 0x153b: 0x00c3, + // Block 0x55, offset 0x1540 + 0x1540: 0x0080, 0x1544: 0x0080, 0x1545: 0x0080, + 0x1546: 0x00c0, 0x1547: 0x00c0, 0x1548: 0x00c0, 0x1549: 0x00c0, 0x154a: 0x00c0, 0x154b: 0x00c0, + 0x154c: 0x00c0, 0x154d: 0x00c0, 0x154e: 0x00c0, 0x154f: 0x00c0, 0x1550: 0x00c0, 0x1551: 0x00c0, + 0x1552: 0x00c0, 0x1553: 0x00c0, 0x1554: 0x00c0, 0x1555: 0x00c0, 0x1556: 0x00c0, 0x1557: 0x00c0, + 0x1558: 0x00c0, 0x1559: 0x00c0, 0x155a: 0x00c0, 0x155b: 0x00c0, 0x155c: 0x00c0, 0x155d: 0x00c0, + 0x155e: 0x00c0, 0x155f: 0x00c0, 0x1560: 0x00c0, 0x1561: 0x00c0, 0x1562: 0x00c0, 0x1563: 0x00c0, + 0x1564: 0x00c0, 0x1565: 0x00c0, 0x1566: 0x00c0, 0x1567: 0x00c0, 0x1568: 0x00c0, 0x1569: 0x00c0, + 0x156a: 0x00c0, 0x156b: 0x00c0, 0x156c: 0x00c0, 0x156d: 0x00c0, + 0x1570: 0x00c0, 0x1571: 0x00c0, 0x1572: 0x00c0, 0x1573: 0x00c0, 0x1574: 0x00c0, + // Block 0x56, offset 0x1580 + 0x1580: 0x00c0, 0x1581: 0x00c0, 0x1582: 0x00c0, 0x1583: 0x00c0, 0x1584: 0x00c0, 0x1585: 0x00c0, + 0x1586: 0x00c0, 0x1587: 0x00c0, 0x1588: 0x00c0, 0x1589: 0x00c0, 0x158a: 0x00c0, 0x158b: 0x00c0, + 0x158c: 0x00c0, 0x158d: 0x00c0, 0x158e: 0x00c0, 0x158f: 0x00c0, 0x1590: 0x00c0, 0x1591: 0x00c0, + 0x1592: 0x00c0, 0x1593: 0x00c0, 0x1594: 0x00c0, 0x1595: 0x00c0, 0x1596: 0x00c0, 0x1597: 0x00c0, + 0x1598: 0x00c0, 0x1599: 0x00c0, 0x159a: 0x00c0, 0x159b: 0x00c0, 0x159c: 0x00c0, 0x159d: 0x00c0, + 0x159e: 0x00c0, 0x159f: 0x00c0, 0x15a0: 0x00c0, 0x15a1: 0x00c0, 0x15a2: 0x00c0, 0x15a3: 0x00c0, + 0x15a4: 0x00c0, 0x15a5: 0x00c0, 0x15a6: 0x00c0, 0x15a7: 0x00c0, 0x15a8: 0x00c0, 0x15a9: 0x00c0, + 0x15aa: 0x00c0, 0x15ab: 0x00c0, + 0x15b0: 0x00c0, 0x15b1: 0x00c0, 0x15b2: 0x00c0, 0x15b3: 0x00c0, 0x15b4: 0x00c0, 0x15b5: 0x00c0, + 0x15b6: 0x00c0, 0x15b7: 0x00c0, 0x15b8: 0x00c0, 0x15b9: 0x00c0, 0x15ba: 0x00c0, 0x15bb: 0x00c0, + 0x15bc: 0x00c0, 0x15bd: 0x00c0, 0x15be: 0x00c0, 0x15bf: 0x00c0, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x00c0, 0x15c1: 0x00c0, 0x15c2: 0x00c0, 0x15c3: 0x00c0, 0x15c4: 0x00c0, 0x15c5: 0x00c0, + 0x15c6: 0x00c0, 0x15c7: 0x00c0, 0x15c8: 0x00c0, 0x15c9: 0x00c0, + 0x15d0: 0x00c0, 0x15d1: 0x00c0, + 0x15d2: 0x00c0, 0x15d3: 0x00c0, 0x15d4: 0x00c0, 0x15d5: 0x00c0, 0x15d6: 0x00c0, 0x15d7: 0x00c0, + 0x15d8: 0x00c0, 0x15d9: 0x00c0, 0x15da: 0x0080, + 0x15de: 0x0080, 0x15df: 0x0080, 0x15e0: 0x0080, 0x15e1: 0x0080, 0x15e2: 0x0080, 0x15e3: 0x0080, + 0x15e4: 0x0080, 0x15e5: 0x0080, 0x15e6: 0x0080, 0x15e7: 0x0080, 0x15e8: 0x0080, 0x15e9: 0x0080, + 0x15ea: 0x0080, 0x15eb: 0x0080, 0x15ec: 0x0080, 0x15ed: 0x0080, 0x15ee: 0x0080, 0x15ef: 0x0080, + 0x15f0: 0x0080, 0x15f1: 0x0080, 0x15f2: 0x0080, 0x15f3: 0x0080, 0x15f4: 0x0080, 0x15f5: 0x0080, + 0x15f6: 0x0080, 0x15f7: 0x0080, 0x15f8: 0x0080, 0x15f9: 0x0080, 0x15fa: 0x0080, 0x15fb: 0x0080, + 0x15fc: 0x0080, 0x15fd: 0x0080, 0x15fe: 0x0080, 0x15ff: 0x0080, + // Block 0x58, offset 0x1600 + 0x1600: 0x00c0, 0x1601: 0x00c0, 0x1602: 0x00c0, 0x1603: 0x00c0, 0x1604: 0x00c0, 0x1605: 0x00c0, + 0x1606: 0x00c0, 0x1607: 0x00c0, 0x1608: 0x00c0, 0x1609: 0x00c0, 0x160a: 0x00c0, 0x160b: 0x00c0, + 0x160c: 0x00c0, 0x160d: 0x00c0, 0x160e: 0x00c0, 0x160f: 0x00c0, 0x1610: 0x00c0, 0x1611: 0x00c0, + 0x1612: 0x00c0, 0x1613: 0x00c0, 0x1614: 0x00c0, 0x1615: 0x00c0, 0x1616: 0x00c0, 0x1617: 0x00c3, + 0x1618: 0x00c3, 0x1619: 0x00c0, 0x161a: 0x00c0, 0x161b: 0x00c3, + 0x161e: 0x0080, 0x161f: 0x0080, 0x1620: 0x00c0, 0x1621: 0x00c0, 0x1622: 0x00c0, 0x1623: 0x00c0, + 0x1624: 0x00c0, 0x1625: 0x00c0, 0x1626: 0x00c0, 0x1627: 0x00c0, 0x1628: 0x00c0, 0x1629: 0x00c0, + 0x162a: 0x00c0, 0x162b: 0x00c0, 0x162c: 0x00c0, 0x162d: 0x00c0, 0x162e: 0x00c0, 0x162f: 0x00c0, + 0x1630: 0x00c0, 0x1631: 0x00c0, 0x1632: 0x00c0, 0x1633: 0x00c0, 0x1634: 0x00c0, 0x1635: 0x00c0, + 0x1636: 0x00c0, 0x1637: 0x00c0, 0x1638: 0x00c0, 0x1639: 0x00c0, 0x163a: 0x00c0, 0x163b: 0x00c0, + 0x163c: 0x00c0, 0x163d: 0x00c0, 0x163e: 0x00c0, 0x163f: 0x00c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x00c0, 0x1641: 0x00c0, 0x1642: 0x00c0, 0x1643: 0x00c0, 0x1644: 0x00c0, 0x1645: 0x00c0, + 0x1646: 0x00c0, 0x1647: 0x00c0, 0x1648: 0x00c0, 0x1649: 0x00c0, 0x164a: 0x00c0, 0x164b: 0x00c0, + 0x164c: 0x00c0, 0x164d: 0x00c0, 0x164e: 0x00c0, 0x164f: 0x00c0, 0x1650: 0x00c0, 0x1651: 0x00c0, + 0x1652: 0x00c0, 0x1653: 0x00c0, 0x1654: 0x00c0, 0x1655: 0x00c0, 0x1656: 0x00c3, 0x1657: 0x00c0, + 0x1658: 0x00c3, 0x1659: 0x00c3, 0x165a: 0x00c3, 0x165b: 0x00c3, 0x165c: 0x00c3, 0x165d: 0x00c3, + 0x165e: 0x00c3, 0x1660: 0x00c6, 0x1661: 0x00c0, 0x1662: 0x00c3, 0x1663: 0x00c0, + 0x1664: 0x00c0, 0x1665: 0x00c3, 0x1666: 0x00c3, 0x1667: 0x00c3, 0x1668: 0x00c3, 0x1669: 0x00c3, + 0x166a: 0x00c3, 0x166b: 0x00c3, 0x166c: 0x00c3, 0x166d: 0x00c0, 0x166e: 0x00c0, 0x166f: 0x00c0, + 0x1670: 0x00c0, 0x1671: 0x00c0, 0x1672: 0x00c0, 0x1673: 0x00c3, 0x1674: 0x00c3, 0x1675: 0x00c3, + 0x1676: 0x00c3, 0x1677: 0x00c3, 0x1678: 0x00c3, 0x1679: 0x00c3, 0x167a: 0x00c3, 0x167b: 0x00c3, + 0x167c: 0x00c3, 0x167f: 0x00c3, + // Block 0x5a, offset 0x1680 + 0x1680: 0x00c0, 0x1681: 0x00c0, 0x1682: 0x00c0, 0x1683: 0x00c0, 0x1684: 0x00c0, 0x1685: 0x00c0, + 0x1686: 0x00c0, 0x1687: 0x00c0, 0x1688: 0x00c0, 0x1689: 0x00c0, + 0x1690: 0x00c0, 0x1691: 0x00c0, + 0x1692: 0x00c0, 0x1693: 0x00c0, 0x1694: 0x00c0, 0x1695: 0x00c0, 0x1696: 0x00c0, 0x1697: 0x00c0, + 0x1698: 0x00c0, 0x1699: 0x00c0, + 0x16a0: 0x0080, 0x16a1: 0x0080, 0x16a2: 0x0080, 0x16a3: 0x0080, + 0x16a4: 0x0080, 0x16a5: 0x0080, 0x16a6: 0x0080, 0x16a7: 0x00c0, 0x16a8: 0x0080, 0x16a9: 0x0080, + 0x16aa: 0x0080, 0x16ab: 0x0080, 0x16ac: 0x0080, 0x16ad: 0x0080, + 0x16b0: 0x00c3, 0x16b1: 0x00c3, 0x16b2: 0x00c3, 0x16b3: 0x00c3, 0x16b4: 0x00c3, 0x16b5: 0x00c3, + 0x16b6: 0x00c3, 0x16b7: 0x00c3, 0x16b8: 0x00c3, 0x16b9: 0x00c3, 0x16ba: 0x00c3, 0x16bb: 0x00c3, + 0x16bc: 0x00c3, 0x16bd: 0x00c3, 0x16be: 0x0083, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x00c3, 0x16c1: 0x00c3, 0x16c2: 0x00c3, 0x16c3: 0x00c3, 0x16c4: 0x00c0, 0x16c5: 0x00c0, + 0x16c6: 0x00c0, 0x16c7: 0x00c0, 0x16c8: 0x00c0, 0x16c9: 0x00c0, 0x16ca: 0x00c0, 0x16cb: 0x00c0, + 0x16cc: 0x00c0, 0x16cd: 0x00c0, 0x16ce: 0x00c0, 0x16cf: 0x00c0, 0x16d0: 0x00c0, 0x16d1: 0x00c0, + 0x16d2: 0x00c0, 0x16d3: 0x00c0, 0x16d4: 0x00c0, 0x16d5: 0x00c0, 0x16d6: 0x00c0, 0x16d7: 0x00c0, + 0x16d8: 0x00c0, 0x16d9: 0x00c0, 0x16da: 0x00c0, 0x16db: 0x00c0, 0x16dc: 0x00c0, 0x16dd: 0x00c0, + 0x16de: 0x00c0, 0x16df: 0x00c0, 0x16e0: 0x00c0, 0x16e1: 0x00c0, 0x16e2: 0x00c0, 0x16e3: 0x00c0, + 0x16e4: 0x00c0, 0x16e5: 0x00c0, 0x16e6: 0x00c0, 0x16e7: 0x00c0, 0x16e8: 0x00c0, 0x16e9: 0x00c0, + 0x16ea: 0x00c0, 0x16eb: 0x00c0, 0x16ec: 0x00c0, 0x16ed: 0x00c0, 0x16ee: 0x00c0, 0x16ef: 0x00c0, + 0x16f0: 0x00c0, 0x16f1: 0x00c0, 0x16f2: 0x00c0, 0x16f3: 0x00c0, 0x16f4: 0x00c3, 0x16f5: 0x00c0, + 0x16f6: 0x00c3, 0x16f7: 0x00c3, 0x16f8: 0x00c3, 0x16f9: 0x00c3, 0x16fa: 0x00c3, 0x16fb: 0x00c0, + 0x16fc: 0x00c3, 0x16fd: 0x00c0, 0x16fe: 0x00c0, 0x16ff: 0x00c0, + // Block 0x5c, offset 0x1700 + 0x1700: 0x00c0, 0x1701: 0x00c0, 0x1702: 0x00c3, 0x1703: 0x00c0, 0x1704: 0x00c5, 0x1705: 0x00c0, + 0x1706: 0x00c0, 0x1707: 0x00c0, 0x1708: 0x00c0, 0x1709: 0x00c0, 0x170a: 0x00c0, 0x170b: 0x00c0, + 0x1710: 0x00c0, 0x1711: 0x00c0, + 0x1712: 0x00c0, 0x1713: 0x00c0, 0x1714: 0x00c0, 0x1715: 0x00c0, 0x1716: 0x00c0, 0x1717: 0x00c0, + 0x1718: 0x00c0, 0x1719: 0x00c0, 0x171a: 0x0080, 0x171b: 0x0080, 0x171c: 0x0080, 0x171d: 0x0080, + 0x171e: 0x0080, 0x171f: 0x0080, 0x1720: 0x0080, 0x1721: 0x0080, 0x1722: 0x0080, 0x1723: 0x0080, + 0x1724: 0x0080, 0x1725: 0x0080, 0x1726: 0x0080, 0x1727: 0x0080, 0x1728: 0x0080, 0x1729: 0x0080, + 0x172a: 0x0080, 0x172b: 0x00c3, 0x172c: 0x00c3, 0x172d: 0x00c3, 0x172e: 0x00c3, 0x172f: 0x00c3, + 0x1730: 0x00c3, 0x1731: 0x00c3, 0x1732: 0x00c3, 0x1733: 0x00c3, 0x1734: 0x0080, 0x1735: 0x0080, + 0x1736: 0x0080, 0x1737: 0x0080, 0x1738: 0x0080, 0x1739: 0x0080, 0x173a: 0x0080, 0x173b: 0x0080, + 0x173c: 0x0080, + // Block 0x5d, offset 0x1740 + 0x1740: 0x00c3, 0x1741: 0x00c3, 0x1742: 0x00c0, 0x1743: 0x00c0, 0x1744: 0x00c0, 0x1745: 0x00c0, + 0x1746: 0x00c0, 0x1747: 0x00c0, 0x1748: 0x00c0, 0x1749: 0x00c0, 0x174a: 0x00c0, 0x174b: 0x00c0, + 0x174c: 0x00c0, 0x174d: 0x00c0, 0x174e: 0x00c0, 0x174f: 0x00c0, 0x1750: 0x00c0, 0x1751: 0x00c0, + 0x1752: 0x00c0, 0x1753: 0x00c0, 0x1754: 0x00c0, 0x1755: 0x00c0, 0x1756: 0x00c0, 0x1757: 0x00c0, + 0x1758: 0x00c0, 0x1759: 0x00c0, 0x175a: 0x00c0, 0x175b: 0x00c0, 0x175c: 0x00c0, 0x175d: 0x00c0, + 0x175e: 0x00c0, 0x175f: 0x00c0, 0x1760: 0x00c0, 0x1761: 0x00c0, 0x1762: 0x00c3, 0x1763: 0x00c3, + 0x1764: 0x00c3, 0x1765: 0x00c3, 0x1766: 0x00c0, 0x1767: 0x00c0, 0x1768: 0x00c3, 0x1769: 0x00c3, + 0x176a: 0x00c5, 0x176b: 0x00c6, 0x176c: 0x00c3, 0x176d: 0x00c3, 0x176e: 0x00c0, 0x176f: 0x00c0, + 0x1770: 0x00c0, 0x1771: 0x00c0, 0x1772: 0x00c0, 0x1773: 0x00c0, 0x1774: 0x00c0, 0x1775: 0x00c0, + 0x1776: 0x00c0, 0x1777: 0x00c0, 0x1778: 0x00c0, 0x1779: 0x00c0, 0x177a: 0x00c0, 0x177b: 0x00c0, + 0x177c: 0x00c0, 0x177d: 0x00c0, 0x177e: 0x00c0, 0x177f: 0x00c0, + // Block 0x5e, offset 0x1780 + 0x1780: 0x00c0, 0x1781: 0x00c0, 0x1782: 0x00c0, 0x1783: 0x00c0, 0x1784: 0x00c0, 0x1785: 0x00c0, + 0x1786: 0x00c0, 0x1787: 0x00c0, 0x1788: 0x00c0, 0x1789: 0x00c0, 0x178a: 0x00c0, 0x178b: 0x00c0, + 0x178c: 0x00c0, 0x178d: 0x00c0, 0x178e: 0x00c0, 0x178f: 0x00c0, 0x1790: 0x00c0, 0x1791: 0x00c0, + 0x1792: 0x00c0, 0x1793: 0x00c0, 0x1794: 0x00c0, 0x1795: 0x00c0, 0x1796: 0x00c0, 0x1797: 0x00c0, + 0x1798: 0x00c0, 0x1799: 0x00c0, 0x179a: 0x00c0, 0x179b: 0x00c0, 0x179c: 0x00c0, 0x179d: 0x00c0, + 0x179e: 0x00c0, 0x179f: 0x00c0, 0x17a0: 0x00c0, 0x17a1: 0x00c0, 0x17a2: 0x00c0, 0x17a3: 0x00c0, + 0x17a4: 0x00c0, 0x17a5: 0x00c0, 0x17a6: 0x00c3, 0x17a7: 0x00c0, 0x17a8: 0x00c3, 0x17a9: 0x00c3, + 0x17aa: 0x00c0, 0x17ab: 0x00c0, 0x17ac: 0x00c0, 0x17ad: 0x00c3, 0x17ae: 0x00c0, 0x17af: 0x00c3, + 0x17b0: 0x00c3, 0x17b1: 0x00c3, 0x17b2: 0x00c5, 0x17b3: 0x00c5, + 0x17bc: 0x0080, 0x17bd: 0x0080, 0x17be: 0x0080, 0x17bf: 0x0080, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x00c0, 0x17c1: 0x00c0, 0x17c2: 0x00c0, 0x17c3: 0x00c0, 0x17c4: 0x00c0, 0x17c5: 0x00c0, + 0x17c6: 0x00c0, 0x17c7: 0x00c0, 0x17c8: 0x00c0, 0x17c9: 0x00c0, 0x17ca: 0x00c0, 0x17cb: 0x00c0, + 0x17cc: 0x00c0, 0x17cd: 0x00c0, 0x17ce: 0x00c0, 0x17cf: 0x00c0, 0x17d0: 0x00c0, 0x17d1: 0x00c0, + 0x17d2: 0x00c0, 0x17d3: 0x00c0, 0x17d4: 0x00c0, 0x17d5: 0x00c0, 0x17d6: 0x00c0, 0x17d7: 0x00c0, + 0x17d8: 0x00c0, 0x17d9: 0x00c0, 0x17da: 0x00c0, 0x17db: 0x00c0, 0x17dc: 0x00c0, 0x17dd: 0x00c0, + 0x17de: 0x00c0, 0x17df: 0x00c0, 0x17e0: 0x00c0, 0x17e1: 0x00c0, 0x17e2: 0x00c0, 0x17e3: 0x00c0, + 0x17e4: 0x00c0, 0x17e5: 0x00c0, 0x17e6: 0x00c0, 0x17e7: 0x00c0, 0x17e8: 0x00c0, 0x17e9: 0x00c0, + 0x17ea: 0x00c0, 0x17eb: 0x00c0, 0x17ec: 0x00c3, 0x17ed: 0x00c3, 0x17ee: 0x00c3, 0x17ef: 0x00c3, + 0x17f0: 0x00c3, 0x17f1: 0x00c3, 0x17f2: 0x00c3, 0x17f3: 0x00c3, 0x17f4: 0x00c0, 0x17f5: 0x00c0, + 0x17f6: 0x00c3, 0x17f7: 0x00c3, 0x17fb: 0x0080, + 0x17fc: 0x0080, 0x17fd: 0x0080, 0x17fe: 0x0080, 0x17ff: 0x0080, + // Block 0x60, offset 0x1800 + 0x1800: 0x00c0, 0x1801: 0x00c0, 0x1802: 0x00c0, 0x1803: 0x00c0, 0x1804: 0x00c0, 0x1805: 0x00c0, + 0x1806: 0x00c0, 0x1807: 0x00c0, 0x1808: 0x00c0, 0x1809: 0x00c0, + 0x180d: 0x00c0, 0x180e: 0x00c0, 0x180f: 0x00c0, 0x1810: 0x00c0, 0x1811: 0x00c0, + 0x1812: 0x00c0, 0x1813: 0x00c0, 0x1814: 0x00c0, 0x1815: 0x00c0, 0x1816: 0x00c0, 0x1817: 0x00c0, + 0x1818: 0x00c0, 0x1819: 0x00c0, 0x181a: 0x00c0, 0x181b: 0x00c0, 0x181c: 0x00c0, 0x181d: 0x00c0, + 0x181e: 0x00c0, 0x181f: 0x00c0, 0x1820: 0x00c0, 0x1821: 0x00c0, 0x1822: 0x00c0, 0x1823: 0x00c0, + 0x1824: 0x00c0, 0x1825: 0x00c0, 0x1826: 0x00c0, 0x1827: 0x00c0, 0x1828: 0x00c0, 0x1829: 0x00c0, + 0x182a: 0x00c0, 0x182b: 0x00c0, 0x182c: 0x00c0, 0x182d: 0x00c0, 0x182e: 0x00c0, 0x182f: 0x00c0, + 0x1830: 0x00c0, 0x1831: 0x00c0, 0x1832: 0x00c0, 0x1833: 0x00c0, 0x1834: 0x00c0, 0x1835: 0x00c0, + 0x1836: 0x00c0, 0x1837: 0x00c0, 0x1838: 0x00c0, 0x1839: 0x00c0, 0x183a: 0x00c0, 0x183b: 0x00c0, + 0x183c: 0x00c0, 0x183d: 0x00c0, 0x183e: 0x0080, 0x183f: 0x0080, + // Block 0x61, offset 0x1840 + 0x1840: 0x00c0, 0x1841: 0x00c0, 0x1842: 0x00c0, 0x1843: 0x00c0, 0x1844: 0x00c0, 0x1845: 0x00c0, + 0x1846: 0x00c0, 0x1847: 0x00c0, 0x1848: 0x00c0, + // Block 0x62, offset 0x1880 + 0x1880: 0x0080, 0x1881: 0x0080, 0x1882: 0x0080, 0x1883: 0x0080, 0x1884: 0x0080, 0x1885: 0x0080, + 0x1886: 0x0080, 0x1887: 0x0080, + 0x1890: 0x00c3, 0x1891: 0x00c3, + 0x1892: 0x00c3, 0x1893: 0x0080, 0x1894: 0x00c3, 0x1895: 0x00c3, 0x1896: 0x00c3, 0x1897: 0x00c3, + 0x1898: 0x00c3, 0x1899: 0x00c3, 0x189a: 0x00c3, 0x189b: 0x00c3, 0x189c: 0x00c3, 0x189d: 0x00c3, + 0x189e: 0x00c3, 0x189f: 0x00c3, 0x18a0: 0x00c3, 0x18a1: 0x00c0, 0x18a2: 0x00c3, 0x18a3: 0x00c3, + 0x18a4: 0x00c3, 0x18a5: 0x00c3, 0x18a6: 0x00c3, 0x18a7: 0x00c3, 0x18a8: 0x00c3, 0x18a9: 0x00c0, + 0x18aa: 0x00c0, 0x18ab: 0x00c0, 0x18ac: 0x00c0, 0x18ad: 0x00c3, 0x18ae: 0x00c0, 0x18af: 0x00c0, + 0x18b0: 0x00c0, 0x18b1: 0x00c0, 0x18b2: 0x00c0, 0x18b3: 0x00c0, 0x18b4: 0x00c3, 0x18b5: 0x00c0, + 0x18b6: 0x00c0, 0x18b8: 0x00c3, 0x18b9: 0x00c3, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x00c0, 0x18c1: 0x00c0, 0x18c2: 0x00c0, 0x18c3: 0x00c0, 0x18c4: 0x00c0, 0x18c5: 0x00c0, + 0x18c6: 0x00c0, 0x18c7: 0x00c0, 0x18c8: 0x00c0, 0x18c9: 0x00c0, 0x18ca: 0x00c0, 0x18cb: 0x00c0, + 0x18cc: 0x00c0, 0x18cd: 0x00c0, 0x18ce: 0x00c0, 0x18cf: 0x00c0, 0x18d0: 0x00c0, 0x18d1: 0x00c0, + 0x18d2: 0x00c0, 0x18d3: 0x00c0, 0x18d4: 0x00c0, 0x18d5: 0x00c0, 0x18d6: 0x00c0, 0x18d7: 0x00c0, + 0x18d8: 0x00c0, 0x18d9: 0x00c0, 0x18da: 0x00c0, 0x18db: 0x00c0, 0x18dc: 0x00c0, 0x18dd: 0x00c0, + 0x18de: 0x00c0, 0x18df: 0x00c0, 0x18e0: 0x00c0, 0x18e1: 0x00c0, 0x18e2: 0x00c0, 0x18e3: 0x00c0, + 0x18e4: 0x00c0, 0x18e5: 0x00c0, 0x18e6: 0x00c8, 0x18e7: 0x00c8, 0x18e8: 0x00c8, 0x18e9: 0x00c8, + 0x18ea: 0x00c8, 0x18eb: 0x00c0, 0x18ec: 0x0080, 0x18ed: 0x0080, 0x18ee: 0x0080, 0x18ef: 0x00c0, + 0x18f0: 0x0080, 0x18f1: 0x0080, 0x18f2: 0x0080, 0x18f3: 0x0080, 0x18f4: 0x0080, 0x18f5: 0x0080, + 0x18f6: 0x0080, 0x18f7: 0x0080, 0x18f8: 0x0080, 0x18f9: 0x0080, 0x18fa: 0x0080, 0x18fb: 0x00c0, + 0x18fc: 0x0080, 0x18fd: 0x0080, 0x18fe: 0x0080, 0x18ff: 0x0080, + // Block 0x64, offset 0x1900 + 0x1900: 0x0080, 0x1901: 0x0080, 0x1902: 0x0080, 0x1903: 0x0080, 0x1904: 0x0080, 0x1905: 0x0080, + 0x1906: 0x0080, 0x1907: 0x0080, 0x1908: 0x0080, 0x1909: 0x0080, 0x190a: 0x0080, 0x190b: 0x0080, + 0x190c: 0x0080, 0x190d: 0x0080, 0x190e: 0x00c0, 0x190f: 0x0080, 0x1910: 0x0080, 0x1911: 0x0080, + 0x1912: 0x0080, 0x1913: 0x0080, 0x1914: 0x0080, 0x1915: 0x0080, 0x1916: 0x0080, 0x1917: 0x0080, + 0x1918: 0x0080, 0x1919: 0x0080, 0x191a: 0x0080, 0x191b: 0x0080, 0x191c: 0x0080, 0x191d: 0x0088, + 0x191e: 0x0088, 0x191f: 0x0088, 0x1920: 0x0088, 0x1921: 0x0088, 0x1922: 0x0080, 0x1923: 0x0080, + 0x1924: 0x0080, 0x1925: 0x0080, 0x1926: 0x0088, 0x1927: 0x0088, 0x1928: 0x0088, 0x1929: 0x0088, + 0x192a: 0x0088, 0x192b: 0x00c0, 0x192c: 0x00c0, 0x192d: 0x00c0, 0x192e: 0x00c0, 0x192f: 0x00c0, + 0x1930: 0x00c0, 0x1931: 0x00c0, 0x1932: 0x00c0, 0x1933: 0x00c0, 0x1934: 0x00c0, 0x1935: 0x00c0, + 0x1936: 0x00c0, 0x1937: 0x00c0, 0x1938: 0x0080, 0x1939: 0x00c0, 0x193a: 0x00c0, 0x193b: 0x00c0, + 0x193c: 0x00c0, 0x193d: 0x00c0, 0x193e: 0x00c0, 0x193f: 0x00c0, + // Block 0x65, offset 0x1940 + 0x1940: 0x00c0, 0x1941: 0x00c0, 0x1942: 0x00c0, 0x1943: 0x00c0, 0x1944: 0x00c0, 0x1945: 0x00c0, + 0x1946: 0x00c0, 0x1947: 0x00c0, 0x1948: 0x00c0, 0x1949: 0x00c0, 0x194a: 0x00c0, 0x194b: 0x00c0, + 0x194c: 0x00c0, 0x194d: 0x00c0, 0x194e: 0x00c0, 0x194f: 0x00c0, 0x1950: 0x00c0, 0x1951: 0x00c0, + 0x1952: 0x00c0, 0x1953: 0x00c0, 0x1954: 0x00c0, 0x1955: 0x00c0, 0x1956: 0x00c0, 0x1957: 0x00c0, + 0x1958: 0x00c0, 0x1959: 0x00c0, 0x195a: 0x00c0, 0x195b: 0x0080, 0x195c: 0x0080, 0x195d: 0x0080, + 0x195e: 0x0080, 0x195f: 0x0080, 0x1960: 0x0080, 0x1961: 0x0080, 0x1962: 0x0080, 0x1963: 0x0080, + 0x1964: 0x0080, 0x1965: 0x0080, 0x1966: 0x0080, 0x1967: 0x0080, 0x1968: 0x0080, 0x1969: 0x0080, + 0x196a: 0x0080, 0x196b: 0x0080, 0x196c: 0x0080, 0x196d: 0x0080, 0x196e: 0x0080, 0x196f: 0x0080, + 0x1970: 0x0080, 0x1971: 0x0080, 0x1972: 0x0080, 0x1973: 0x0080, 0x1974: 0x0080, 0x1975: 0x0080, + 0x1976: 0x0080, 0x1977: 0x0080, 0x1978: 0x0080, 0x1979: 0x0080, 0x197a: 0x0080, 0x197b: 0x0080, + 0x197c: 0x0080, 0x197d: 0x0080, 0x197e: 0x0080, 0x197f: 0x0088, + // Block 0x66, offset 0x1980 + 0x1980: 0x00c3, 0x1981: 0x00c3, 0x1982: 0x00c3, 0x1983: 0x00c3, 0x1984: 0x00c3, 0x1985: 0x00c3, + 0x1986: 0x00c3, 0x1987: 0x00c3, 0x1988: 0x00c3, 0x1989: 0x00c3, 0x198a: 0x00c3, 0x198b: 0x00c3, + 0x198c: 0x00c3, 0x198d: 0x00c3, 0x198e: 0x00c3, 0x198f: 0x00c3, 0x1990: 0x00c3, 0x1991: 0x00c3, + 0x1992: 0x00c3, 0x1993: 0x00c3, 0x1994: 0x00c3, 0x1995: 0x00c3, 0x1996: 0x00c3, 0x1997: 0x00c3, + 0x1998: 0x00c3, 0x1999: 0x00c3, 0x199a: 0x00c3, 0x199b: 0x00c3, 0x199c: 0x00c3, 0x199d: 0x00c3, + 0x199e: 0x00c3, 0x199f: 0x00c3, 0x19a0: 0x00c3, 0x19a1: 0x00c3, 0x19a2: 0x00c3, 0x19a3: 0x00c3, + 0x19a4: 0x00c3, 0x19a5: 0x00c3, 0x19a6: 0x00c3, 0x19a7: 0x00c3, 0x19a8: 0x00c3, 0x19a9: 0x00c3, + 0x19aa: 0x00c3, 0x19ab: 0x00c3, 0x19ac: 0x00c3, 0x19ad: 0x00c3, 0x19ae: 0x00c3, 0x19af: 0x00c3, + 0x19b0: 0x00c3, 0x19b1: 0x00c3, 0x19b2: 0x00c3, 0x19b3: 0x00c3, 0x19b4: 0x00c3, 0x19b5: 0x00c3, + 0x19bb: 0x00c3, + 0x19bc: 0x00c3, 0x19bd: 0x00c3, 0x19be: 0x00c3, 0x19bf: 0x00c3, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x00c0, 0x19c1: 0x00c0, 0x19c2: 0x00c0, 0x19c3: 0x00c0, 0x19c4: 0x00c0, 0x19c5: 0x00c0, + 0x19c6: 0x00c0, 0x19c7: 0x00c0, 0x19c8: 0x00c0, 0x19c9: 0x00c0, 0x19ca: 0x00c0, 0x19cb: 0x00c0, + 0x19cc: 0x00c0, 0x19cd: 0x00c0, 0x19ce: 0x00c0, 0x19cf: 0x00c0, 0x19d0: 0x00c0, 0x19d1: 0x00c0, + 0x19d2: 0x00c0, 0x19d3: 0x00c0, 0x19d4: 0x00c0, 0x19d5: 0x00c0, 0x19d6: 0x00c0, 0x19d7: 0x00c0, + 0x19d8: 0x00c0, 0x19d9: 0x00c0, 0x19da: 0x0080, 0x19db: 0x0080, 0x19dc: 0x00c0, 0x19dd: 0x00c0, + 0x19de: 0x00c0, 0x19df: 0x00c0, 0x19e0: 0x00c0, 0x19e1: 0x00c0, 0x19e2: 0x00c0, 0x19e3: 0x00c0, + 0x19e4: 0x00c0, 0x19e5: 0x00c0, 0x19e6: 0x00c0, 0x19e7: 0x00c0, 0x19e8: 0x00c0, 0x19e9: 0x00c0, + 0x19ea: 0x00c0, 0x19eb: 0x00c0, 0x19ec: 0x00c0, 0x19ed: 0x00c0, 0x19ee: 0x00c0, 0x19ef: 0x00c0, + 0x19f0: 0x00c0, 0x19f1: 0x00c0, 0x19f2: 0x00c0, 0x19f3: 0x00c0, 0x19f4: 0x00c0, 0x19f5: 0x00c0, + 0x19f6: 0x00c0, 0x19f7: 0x00c0, 0x19f8: 0x00c0, 0x19f9: 0x00c0, 0x19fa: 0x00c0, 0x19fb: 0x00c0, + 0x19fc: 0x00c0, 0x19fd: 0x00c0, 0x19fe: 0x00c0, 0x19ff: 0x00c0, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x00c8, 0x1a01: 0x00c8, 0x1a02: 0x00c8, 0x1a03: 0x00c8, 0x1a04: 0x00c8, 0x1a05: 0x00c8, + 0x1a06: 0x00c8, 0x1a07: 0x00c8, 0x1a08: 0x00c8, 0x1a09: 0x00c8, 0x1a0a: 0x00c8, 0x1a0b: 0x00c8, + 0x1a0c: 0x00c8, 0x1a0d: 0x00c8, 0x1a0e: 0x00c8, 0x1a0f: 0x00c8, 0x1a10: 0x00c8, 0x1a11: 0x00c8, + 0x1a12: 0x00c8, 0x1a13: 0x00c8, 0x1a14: 0x00c8, 0x1a15: 0x00c8, + 0x1a18: 0x00c8, 0x1a19: 0x00c8, 0x1a1a: 0x00c8, 0x1a1b: 0x00c8, 0x1a1c: 0x00c8, 0x1a1d: 0x00c8, + 0x1a20: 0x00c8, 0x1a21: 0x00c8, 0x1a22: 0x00c8, 0x1a23: 0x00c8, + 0x1a24: 0x00c8, 0x1a25: 0x00c8, 0x1a26: 0x00c8, 0x1a27: 0x00c8, 0x1a28: 0x00c8, 0x1a29: 0x00c8, + 0x1a2a: 0x00c8, 0x1a2b: 0x00c8, 0x1a2c: 0x00c8, 0x1a2d: 0x00c8, 0x1a2e: 0x00c8, 0x1a2f: 0x00c8, + 0x1a30: 0x00c8, 0x1a31: 0x00c8, 0x1a32: 0x00c8, 0x1a33: 0x00c8, 0x1a34: 0x00c8, 0x1a35: 0x00c8, + 0x1a36: 0x00c8, 0x1a37: 0x00c8, 0x1a38: 0x00c8, 0x1a39: 0x00c8, 0x1a3a: 0x00c8, 0x1a3b: 0x00c8, + 0x1a3c: 0x00c8, 0x1a3d: 0x00c8, 0x1a3e: 0x00c8, 0x1a3f: 0x00c8, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x00c8, 0x1a41: 0x00c8, 0x1a42: 0x00c8, 0x1a43: 0x00c8, 0x1a44: 0x00c8, 0x1a45: 0x00c8, + 0x1a48: 0x00c8, 0x1a49: 0x00c8, 0x1a4a: 0x00c8, 0x1a4b: 0x00c8, + 0x1a4c: 0x00c8, 0x1a4d: 0x00c8, 0x1a50: 0x00c8, 0x1a51: 0x00c8, + 0x1a52: 0x00c8, 0x1a53: 0x00c8, 0x1a54: 0x00c8, 0x1a55: 0x00c8, 0x1a56: 0x00c8, 0x1a57: 0x00c8, + 0x1a59: 0x00c8, 0x1a5b: 0x00c8, 0x1a5d: 0x00c8, + 0x1a5f: 0x00c8, 0x1a60: 0x00c8, 0x1a61: 0x00c8, 0x1a62: 0x00c8, 0x1a63: 0x00c8, + 0x1a64: 0x00c8, 0x1a65: 0x00c8, 0x1a66: 0x00c8, 0x1a67: 0x00c8, 0x1a68: 0x00c8, 0x1a69: 0x00c8, + 0x1a6a: 0x00c8, 0x1a6b: 0x00c8, 0x1a6c: 0x00c8, 0x1a6d: 0x00c8, 0x1a6e: 0x00c8, 0x1a6f: 0x00c8, + 0x1a70: 0x00c8, 0x1a71: 0x0088, 0x1a72: 0x00c8, 0x1a73: 0x0088, 0x1a74: 0x00c8, 0x1a75: 0x0088, + 0x1a76: 0x00c8, 0x1a77: 0x0088, 0x1a78: 0x00c8, 0x1a79: 0x0088, 0x1a7a: 0x00c8, 0x1a7b: 0x0088, + 0x1a7c: 0x00c8, 0x1a7d: 0x0088, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x00c8, 0x1a81: 0x00c8, 0x1a82: 0x00c8, 0x1a83: 0x00c8, 0x1a84: 0x00c8, 0x1a85: 0x00c8, + 0x1a86: 0x00c8, 0x1a87: 0x00c8, 0x1a88: 0x0088, 0x1a89: 0x0088, 0x1a8a: 0x0088, 0x1a8b: 0x0088, + 0x1a8c: 0x0088, 0x1a8d: 0x0088, 0x1a8e: 0x0088, 0x1a8f: 0x0088, 0x1a90: 0x00c8, 0x1a91: 0x00c8, + 0x1a92: 0x00c8, 0x1a93: 0x00c8, 0x1a94: 0x00c8, 0x1a95: 0x00c8, 0x1a96: 0x00c8, 0x1a97: 0x00c8, + 0x1a98: 0x0088, 0x1a99: 0x0088, 0x1a9a: 0x0088, 0x1a9b: 0x0088, 0x1a9c: 0x0088, 0x1a9d: 0x0088, + 0x1a9e: 0x0088, 0x1a9f: 0x0088, 0x1aa0: 0x00c8, 0x1aa1: 0x00c8, 0x1aa2: 0x00c8, 0x1aa3: 0x00c8, + 0x1aa4: 0x00c8, 0x1aa5: 0x00c8, 0x1aa6: 0x00c8, 0x1aa7: 0x00c8, 0x1aa8: 0x0088, 0x1aa9: 0x0088, + 0x1aaa: 0x0088, 0x1aab: 0x0088, 0x1aac: 0x0088, 0x1aad: 0x0088, 0x1aae: 0x0088, 0x1aaf: 0x0088, + 0x1ab0: 0x00c8, 0x1ab1: 0x00c8, 0x1ab2: 0x00c8, 0x1ab3: 0x00c8, 0x1ab4: 0x00c8, + 0x1ab6: 0x00c8, 0x1ab7: 0x00c8, 0x1ab8: 0x00c8, 0x1ab9: 0x00c8, 0x1aba: 0x00c8, 0x1abb: 0x0088, + 0x1abc: 0x0088, 0x1abd: 0x0088, 0x1abe: 0x0088, 0x1abf: 0x0088, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x0088, 0x1ac1: 0x0088, 0x1ac2: 0x00c8, 0x1ac3: 0x00c8, 0x1ac4: 0x00c8, + 0x1ac6: 0x00c8, 0x1ac7: 0x00c8, 0x1ac8: 0x00c8, 0x1ac9: 0x0088, 0x1aca: 0x00c8, 0x1acb: 0x0088, + 0x1acc: 0x0088, 0x1acd: 0x0088, 0x1ace: 0x0088, 0x1acf: 0x0088, 0x1ad0: 0x00c8, 0x1ad1: 0x00c8, + 0x1ad2: 0x00c8, 0x1ad3: 0x0088, 0x1ad6: 0x00c8, 0x1ad7: 0x00c8, + 0x1ad8: 0x00c8, 0x1ad9: 0x00c8, 0x1ada: 0x00c8, 0x1adb: 0x0088, 0x1add: 0x0088, + 0x1ade: 0x0088, 0x1adf: 0x0088, 0x1ae0: 0x00c8, 0x1ae1: 0x00c8, 0x1ae2: 0x00c8, 0x1ae3: 0x0088, + 0x1ae4: 0x00c8, 0x1ae5: 0x00c8, 0x1ae6: 0x00c8, 0x1ae7: 0x00c8, 0x1ae8: 0x00c8, 0x1ae9: 0x00c8, + 0x1aea: 0x00c8, 0x1aeb: 0x0088, 0x1aec: 0x00c8, 0x1aed: 0x0088, 0x1aee: 0x0088, 0x1aef: 0x0088, + 0x1af2: 0x00c8, 0x1af3: 0x00c8, 0x1af4: 0x00c8, + 0x1af6: 0x00c8, 0x1af7: 0x00c8, 0x1af8: 0x00c8, 0x1af9: 0x0088, 0x1afa: 0x00c8, 0x1afb: 0x0088, + 0x1afc: 0x0088, 0x1afd: 0x0088, 0x1afe: 0x0088, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x0080, 0x1b01: 0x0080, 0x1b02: 0x0080, 0x1b03: 0x0080, 0x1b04: 0x0080, 0x1b05: 0x0080, + 0x1b06: 0x0080, 0x1b07: 0x0080, 0x1b08: 0x0080, 0x1b09: 0x0080, 0x1b0a: 0x0080, 0x1b0b: 0x0040, + 0x1b0c: 0x004d, 0x1b0d: 0x004e, 0x1b0e: 0x0040, 0x1b0f: 0x0040, 0x1b10: 0x0080, 0x1b11: 0x0080, + 0x1b12: 0x0080, 0x1b13: 0x0080, 0x1b14: 0x0080, 0x1b15: 0x0080, 0x1b16: 0x0080, 0x1b17: 0x0080, + 0x1b18: 0x0080, 0x1b19: 0x0080, 0x1b1a: 0x0080, 0x1b1b: 0x0080, 0x1b1c: 0x0080, 0x1b1d: 0x0080, + 0x1b1e: 0x0080, 0x1b1f: 0x0080, 0x1b20: 0x0080, 0x1b21: 0x0080, 0x1b22: 0x0080, 0x1b23: 0x0080, + 0x1b24: 0x0080, 0x1b25: 0x0080, 0x1b26: 0x0080, 0x1b27: 0x0080, 0x1b28: 0x0040, 0x1b29: 0x0040, + 0x1b2a: 0x0040, 0x1b2b: 0x0040, 0x1b2c: 0x0040, 0x1b2d: 0x0040, 0x1b2e: 0x0040, 0x1b2f: 0x0080, + 0x1b30: 0x0080, 0x1b31: 0x0080, 0x1b32: 0x0080, 0x1b33: 0x0080, 0x1b34: 0x0080, 0x1b35: 0x0080, + 0x1b36: 0x0080, 0x1b37: 0x0080, 0x1b38: 0x0080, 0x1b39: 0x0080, 0x1b3a: 0x0080, 0x1b3b: 0x0080, + 0x1b3c: 0x0080, 0x1b3d: 0x0080, 0x1b3e: 0x0080, 0x1b3f: 0x0080, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x0080, 0x1b41: 0x0080, 0x1b42: 0x0080, 0x1b43: 0x0080, 0x1b44: 0x0080, 0x1b45: 0x0080, + 0x1b46: 0x0080, 0x1b47: 0x0080, 0x1b48: 0x0080, 0x1b49: 0x0080, 0x1b4a: 0x0080, 0x1b4b: 0x0080, + 0x1b4c: 0x0080, 0x1b4d: 0x0080, 0x1b4e: 0x0080, 0x1b4f: 0x0080, 0x1b50: 0x0080, 0x1b51: 0x0080, + 0x1b52: 0x0080, 0x1b53: 0x0080, 0x1b54: 0x0080, 0x1b55: 0x0080, 0x1b56: 0x0080, 0x1b57: 0x0080, + 0x1b58: 0x0080, 0x1b59: 0x0080, 0x1b5a: 0x0080, 0x1b5b: 0x0080, 0x1b5c: 0x0080, 0x1b5d: 0x0080, + 0x1b5e: 0x0080, 0x1b5f: 0x0080, 0x1b60: 0x0040, 0x1b61: 0x0040, 0x1b62: 0x0040, 0x1b63: 0x0040, + 0x1b64: 0x0040, 0x1b66: 0x0040, 0x1b67: 0x0040, 0x1b68: 0x0040, 0x1b69: 0x0040, + 0x1b6a: 0x0040, 0x1b6b: 0x0040, 0x1b6c: 0x0040, 0x1b6d: 0x0040, 0x1b6e: 0x0040, 0x1b6f: 0x0040, + 0x1b70: 0x0080, 0x1b71: 0x0080, 0x1b74: 0x0080, 0x1b75: 0x0080, + 0x1b76: 0x0080, 0x1b77: 0x0080, 0x1b78: 0x0080, 0x1b79: 0x0080, 0x1b7a: 0x0080, 0x1b7b: 0x0080, + 0x1b7c: 0x0080, 0x1b7d: 0x0080, 0x1b7e: 0x0080, 0x1b7f: 0x0080, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x0080, 0x1b81: 0x0080, 0x1b82: 0x0080, 0x1b83: 0x0080, 0x1b84: 0x0080, 0x1b85: 0x0080, + 0x1b86: 0x0080, 0x1b87: 0x0080, 0x1b88: 0x0080, 0x1b89: 0x0080, 0x1b8a: 0x0080, 0x1b8b: 0x0080, + 0x1b8c: 0x0080, 0x1b8d: 0x0080, 0x1b8e: 0x0080, 0x1b90: 0x0080, 0x1b91: 0x0080, + 0x1b92: 0x0080, 0x1b93: 0x0080, 0x1b94: 0x0080, 0x1b95: 0x0080, 0x1b96: 0x0080, 0x1b97: 0x0080, + 0x1b98: 0x0080, 0x1b99: 0x0080, 0x1b9a: 0x0080, 0x1b9b: 0x0080, 0x1b9c: 0x0080, + 0x1ba0: 0x0080, 0x1ba1: 0x0080, 0x1ba2: 0x0080, 0x1ba3: 0x0080, + 0x1ba4: 0x0080, 0x1ba5: 0x0080, 0x1ba6: 0x0080, 0x1ba7: 0x0080, 0x1ba8: 0x0080, 0x1ba9: 0x0080, + 0x1baa: 0x0080, 0x1bab: 0x0080, 0x1bac: 0x0080, 0x1bad: 0x0080, 0x1bae: 0x0080, 0x1baf: 0x0080, + 0x1bb0: 0x0080, 0x1bb1: 0x0080, 0x1bb2: 0x0080, 0x1bb3: 0x0080, 0x1bb4: 0x0080, 0x1bb5: 0x0080, + 0x1bb6: 0x0080, 0x1bb7: 0x0080, 0x1bb8: 0x0080, 0x1bb9: 0x0080, 0x1bba: 0x0080, 0x1bbb: 0x0080, + 0x1bbc: 0x0080, 0x1bbd: 0x0080, 0x1bbe: 0x0080, + // Block 0x6f, offset 0x1bc0 + 0x1bd0: 0x00c3, 0x1bd1: 0x00c3, + 0x1bd2: 0x00c3, 0x1bd3: 0x00c3, 0x1bd4: 0x00c3, 0x1bd5: 0x00c3, 0x1bd6: 0x00c3, 0x1bd7: 0x00c3, + 0x1bd8: 0x00c3, 0x1bd9: 0x00c3, 0x1bda: 0x00c3, 0x1bdb: 0x00c3, 0x1bdc: 0x00c3, 0x1bdd: 0x0083, + 0x1bde: 0x0083, 0x1bdf: 0x0083, 0x1be0: 0x0083, 0x1be1: 0x00c3, 0x1be2: 0x0083, 0x1be3: 0x0083, + 0x1be4: 0x0083, 0x1be5: 0x00c3, 0x1be6: 0x00c3, 0x1be7: 0x00c3, 0x1be8: 0x00c3, 0x1be9: 0x00c3, + 0x1bea: 0x00c3, 0x1beb: 0x00c3, 0x1bec: 0x00c3, 0x1bed: 0x00c3, 0x1bee: 0x00c3, 0x1bef: 0x00c3, + 0x1bf0: 0x00c3, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x0080, 0x1c01: 0x0080, 0x1c02: 0x0080, 0x1c03: 0x0080, 0x1c04: 0x0080, 0x1c05: 0x0080, + 0x1c06: 0x0080, 0x1c07: 0x0080, 0x1c08: 0x0080, 0x1c09: 0x0080, 0x1c0a: 0x0080, 0x1c0b: 0x0080, + 0x1c0c: 0x0080, 0x1c0d: 0x0080, 0x1c0e: 0x0080, 0x1c0f: 0x0080, 0x1c10: 0x0080, 0x1c11: 0x0080, + 0x1c12: 0x0080, 0x1c13: 0x0080, 0x1c14: 0x0080, 0x1c15: 0x0080, 0x1c16: 0x0080, 0x1c17: 0x0080, + 0x1c18: 0x0080, 0x1c19: 0x0080, 0x1c1a: 0x0080, 0x1c1b: 0x0080, 0x1c1c: 0x0080, 0x1c1d: 0x0080, + 0x1c1e: 0x0080, 0x1c1f: 0x0080, 0x1c20: 0x0080, 0x1c21: 0x0080, 0x1c22: 0x0080, 0x1c23: 0x0080, + 0x1c24: 0x0080, 0x1c25: 0x0080, 0x1c26: 0x0088, 0x1c27: 0x0080, 0x1c28: 0x0080, 0x1c29: 0x0080, + 0x1c2a: 0x0080, 0x1c2b: 0x0080, 0x1c2c: 0x0080, 0x1c2d: 0x0080, 0x1c2e: 0x0080, 0x1c2f: 0x0080, + 0x1c30: 0x0080, 0x1c31: 0x0080, 0x1c32: 0x00c0, 0x1c33: 0x0080, 0x1c34: 0x0080, 0x1c35: 0x0080, + 0x1c36: 0x0080, 0x1c37: 0x0080, 0x1c38: 0x0080, 0x1c39: 0x0080, 0x1c3a: 0x0080, 0x1c3b: 0x0080, + 0x1c3c: 0x0080, 0x1c3d: 0x0080, 0x1c3e: 0x0080, 0x1c3f: 0x0080, + // Block 0x71, offset 0x1c40 + 0x1c40: 0x0080, 0x1c41: 0x0080, 0x1c42: 0x0080, 0x1c43: 0x0080, 0x1c44: 0x0080, 0x1c45: 0x0080, + 0x1c46: 0x0080, 0x1c47: 0x0080, 0x1c48: 0x0080, 0x1c49: 0x0080, 0x1c4a: 0x0080, 0x1c4b: 0x0080, + 0x1c4c: 0x0080, 0x1c4d: 0x0080, 0x1c4e: 0x00c0, 0x1c4f: 0x0080, 0x1c50: 0x0080, 0x1c51: 0x0080, + 0x1c52: 0x0080, 0x1c53: 0x0080, 0x1c54: 0x0080, 0x1c55: 0x0080, 0x1c56: 0x0080, 0x1c57: 0x0080, + 0x1c58: 0x0080, 0x1c59: 0x0080, 0x1c5a: 0x0080, 0x1c5b: 0x0080, 0x1c5c: 0x0080, 0x1c5d: 0x0080, + 0x1c5e: 0x0080, 0x1c5f: 0x0080, 0x1c60: 0x0080, 0x1c61: 0x0080, 0x1c62: 0x0080, 0x1c63: 0x0080, + 0x1c64: 0x0080, 0x1c65: 0x0080, 0x1c66: 0x0080, 0x1c67: 0x0080, 0x1c68: 0x0080, 0x1c69: 0x0080, + 0x1c6a: 0x0080, 0x1c6b: 0x0080, 0x1c6c: 0x0080, 0x1c6d: 0x0080, 0x1c6e: 0x0080, 0x1c6f: 0x0080, + 0x1c70: 0x0080, 0x1c71: 0x0080, 0x1c72: 0x0080, 0x1c73: 0x0080, 0x1c74: 0x0080, 0x1c75: 0x0080, + 0x1c76: 0x0080, 0x1c77: 0x0080, 0x1c78: 0x0080, 0x1c79: 0x0080, 0x1c7a: 0x0080, 0x1c7b: 0x0080, + 0x1c7c: 0x0080, 0x1c7d: 0x0080, 0x1c7e: 0x0080, 0x1c7f: 0x0080, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x0080, 0x1c81: 0x0080, 0x1c82: 0x0080, 0x1c83: 0x00c0, 0x1c84: 0x00c0, 0x1c85: 0x0080, + 0x1c86: 0x0080, 0x1c87: 0x0080, 0x1c88: 0x0080, 0x1c89: 0x0080, 0x1c8a: 0x0080, 0x1c8b: 0x0080, + 0x1c90: 0x0080, 0x1c91: 0x0080, + 0x1c92: 0x0080, 0x1c93: 0x0080, 0x1c94: 0x0080, 0x1c95: 0x0080, 0x1c96: 0x0080, 0x1c97: 0x0080, + 0x1c98: 0x0080, 0x1c99: 0x0080, 0x1c9a: 0x0080, 0x1c9b: 0x0080, 0x1c9c: 0x0080, 0x1c9d: 0x0080, + 0x1c9e: 0x0080, 0x1c9f: 0x0080, 0x1ca0: 0x0080, 0x1ca1: 0x0080, 0x1ca2: 0x0080, 0x1ca3: 0x0080, + 0x1ca4: 0x0080, 0x1ca5: 0x0080, 0x1ca6: 0x0080, 0x1ca7: 0x0080, 0x1ca8: 0x0080, 0x1ca9: 0x0080, + 0x1caa: 0x0080, 0x1cab: 0x0080, 0x1cac: 0x0080, 0x1cad: 0x0080, 0x1cae: 0x0080, 0x1caf: 0x0080, + 0x1cb0: 0x0080, 0x1cb1: 0x0080, 0x1cb2: 0x0080, 0x1cb3: 0x0080, 0x1cb4: 0x0080, 0x1cb5: 0x0080, + 0x1cb6: 0x0080, 0x1cb7: 0x0080, 0x1cb8: 0x0080, 0x1cb9: 0x0080, 0x1cba: 0x0080, 0x1cbb: 0x0080, + 0x1cbc: 0x0080, 0x1cbd: 0x0080, 0x1cbe: 0x0080, 0x1cbf: 0x0080, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0080, 0x1cc1: 0x0080, 0x1cc2: 0x0080, 0x1cc3: 0x0080, 0x1cc4: 0x0080, 0x1cc5: 0x0080, + 0x1cc6: 0x0080, 0x1cc7: 0x0080, 0x1cc8: 0x0080, 0x1cc9: 0x0080, 0x1cca: 0x0080, 0x1ccb: 0x0080, + 0x1ccc: 0x0080, 0x1ccd: 0x0080, 0x1cce: 0x0080, 0x1ccf: 0x0080, 0x1cd0: 0x0080, 0x1cd1: 0x0080, + 0x1cd2: 0x0080, 0x1cd3: 0x0080, 0x1cd4: 0x0080, 0x1cd5: 0x0080, 0x1cd6: 0x0080, 0x1cd7: 0x0080, + 0x1cd8: 0x0080, 0x1cd9: 0x0080, 0x1cda: 0x0080, 0x1cdb: 0x0080, 0x1cdc: 0x0080, 0x1cdd: 0x0080, + 0x1cde: 0x0080, 0x1cdf: 0x0080, 0x1ce0: 0x0080, 0x1ce1: 0x0080, 0x1ce2: 0x0080, 0x1ce3: 0x0080, + 0x1ce4: 0x0080, 0x1ce5: 0x0080, 0x1ce6: 0x0080, 0x1ce7: 0x0080, 0x1ce8: 0x0080, 0x1ce9: 0x0080, + 0x1cea: 0x0080, 0x1ceb: 0x0080, 0x1cec: 0x0080, 0x1ced: 0x0080, 0x1cee: 0x0080, 0x1cef: 0x0080, + 0x1cf0: 0x0080, 0x1cf1: 0x0080, 0x1cf2: 0x0080, 0x1cf3: 0x0080, 0x1cf4: 0x0080, 0x1cf5: 0x0080, + 0x1cf6: 0x0080, 0x1cf7: 0x0080, 0x1cf8: 0x0080, 0x1cf9: 0x0080, 0x1cfa: 0x0080, 0x1cfb: 0x0080, + 0x1cfc: 0x0080, 0x1cfd: 0x0080, 0x1cfe: 0x0080, 0x1cff: 0x0080, + // Block 0x74, offset 0x1d00 + 0x1d00: 0x0080, 0x1d01: 0x0080, 0x1d02: 0x0080, 0x1d03: 0x0080, 0x1d04: 0x0080, 0x1d05: 0x0080, + 0x1d06: 0x0080, 0x1d07: 0x0080, 0x1d08: 0x0080, 0x1d09: 0x0080, 0x1d0a: 0x0080, 0x1d0b: 0x0080, + 0x1d0c: 0x0080, 0x1d0d: 0x0080, 0x1d0e: 0x0080, 0x1d0f: 0x0080, 0x1d10: 0x0080, 0x1d11: 0x0080, + 0x1d12: 0x0080, 0x1d13: 0x0080, 0x1d14: 0x0080, 0x1d15: 0x0080, 0x1d16: 0x0080, 0x1d17: 0x0080, + 0x1d18: 0x0080, 0x1d19: 0x0080, 0x1d1a: 0x0080, 0x1d1b: 0x0080, 0x1d1c: 0x0080, 0x1d1d: 0x0080, + 0x1d1e: 0x0080, 0x1d1f: 0x0080, 0x1d20: 0x0080, 0x1d21: 0x0080, 0x1d22: 0x0080, 0x1d23: 0x0080, + 0x1d24: 0x0080, 0x1d25: 0x0080, 0x1d26: 0x0080, 0x1d27: 0x0080, 0x1d28: 0x0080, 0x1d29: 0x0080, + 0x1d2a: 0x0080, 0x1d2b: 0x0080, 0x1d2c: 0x0080, 0x1d2d: 0x0080, 0x1d2e: 0x0080, 0x1d2f: 0x0080, + 0x1d30: 0x0080, 0x1d31: 0x0080, 0x1d32: 0x0080, 0x1d33: 0x0080, 0x1d34: 0x0080, 0x1d35: 0x0080, + 0x1d36: 0x0080, 0x1d37: 0x0080, 0x1d38: 0x0080, 0x1d39: 0x0080, 0x1d3a: 0x0080, 0x1d3b: 0x0080, + 0x1d3c: 0x0080, 0x1d3d: 0x0080, 0x1d3e: 0x0080, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0080, 0x1d41: 0x0080, 0x1d42: 0x0080, 0x1d43: 0x0080, 0x1d44: 0x0080, 0x1d45: 0x0080, + 0x1d46: 0x0080, 0x1d47: 0x0080, 0x1d48: 0x0080, 0x1d49: 0x0080, 0x1d4a: 0x0080, 0x1d4b: 0x0080, + 0x1d4c: 0x0080, 0x1d4d: 0x0080, 0x1d4e: 0x0080, 0x1d4f: 0x0080, 0x1d50: 0x0080, 0x1d51: 0x0080, + 0x1d52: 0x0080, 0x1d53: 0x0080, 0x1d54: 0x0080, 0x1d55: 0x0080, 0x1d56: 0x0080, 0x1d57: 0x0080, + 0x1d58: 0x0080, 0x1d59: 0x0080, 0x1d5a: 0x0080, 0x1d5b: 0x0080, 0x1d5c: 0x0080, 0x1d5d: 0x0080, + 0x1d5e: 0x0080, 0x1d5f: 0x0080, 0x1d60: 0x0080, 0x1d61: 0x0080, 0x1d62: 0x0080, 0x1d63: 0x0080, + 0x1d64: 0x0080, 0x1d65: 0x0080, 0x1d66: 0x0080, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x0080, 0x1d81: 0x0080, 0x1d82: 0x0080, 0x1d83: 0x0080, 0x1d84: 0x0080, 0x1d85: 0x0080, + 0x1d86: 0x0080, 0x1d87: 0x0080, 0x1d88: 0x0080, 0x1d89: 0x0080, 0x1d8a: 0x0080, + 0x1da0: 0x0080, 0x1da1: 0x0080, 0x1da2: 0x0080, 0x1da3: 0x0080, + 0x1da4: 0x0080, 0x1da5: 0x0080, 0x1da6: 0x0080, 0x1da7: 0x0080, 0x1da8: 0x0080, 0x1da9: 0x0080, + 0x1daa: 0x0080, 0x1dab: 0x0080, 0x1dac: 0x0080, 0x1dad: 0x0080, 0x1dae: 0x0080, 0x1daf: 0x0080, + 0x1db0: 0x0080, 0x1db1: 0x0080, 0x1db2: 0x0080, 0x1db3: 0x0080, 0x1db4: 0x0080, 0x1db5: 0x0080, + 0x1db6: 0x0080, 0x1db7: 0x0080, 0x1db8: 0x0080, 0x1db9: 0x0080, 0x1dba: 0x0080, 0x1dbb: 0x0080, + 0x1dbc: 0x0080, 0x1dbd: 0x0080, 0x1dbe: 0x0080, 0x1dbf: 0x0080, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0x0080, 0x1dc1: 0x0080, 0x1dc2: 0x0080, 0x1dc3: 0x0080, 0x1dc4: 0x0080, 0x1dc5: 0x0080, + 0x1dc6: 0x0080, 0x1dc7: 0x0080, 0x1dc8: 0x0080, 0x1dc9: 0x0080, 0x1dca: 0x0080, 0x1dcb: 0x0080, + 0x1dcc: 0x0080, 0x1dcd: 0x0080, 0x1dce: 0x0080, 0x1dcf: 0x0080, 0x1dd0: 0x0080, 0x1dd1: 0x0080, + 0x1dd2: 0x0080, 0x1dd3: 0x0080, 0x1dd4: 0x0080, 0x1dd5: 0x0080, 0x1dd6: 0x0080, 0x1dd7: 0x0080, + 0x1dd8: 0x0080, 0x1dd9: 0x0080, 0x1dda: 0x0080, 0x1ddb: 0x0080, 0x1ddc: 0x0080, 0x1ddd: 0x0080, + 0x1dde: 0x0080, 0x1ddf: 0x0080, 0x1de0: 0x0080, 0x1de1: 0x0080, 0x1de2: 0x0080, 0x1de3: 0x0080, + 0x1de4: 0x0080, 0x1de5: 0x0080, 0x1de6: 0x0080, 0x1de7: 0x0080, 0x1de8: 0x0080, 0x1de9: 0x0080, + 0x1dea: 0x0080, 0x1deb: 0x0080, 0x1dec: 0x0080, 0x1ded: 0x0080, 0x1dee: 0x0080, 0x1def: 0x0080, + 0x1df0: 0x0080, 0x1df1: 0x0080, 0x1df2: 0x0080, 0x1df3: 0x0080, + 0x1df6: 0x0080, 0x1df7: 0x0080, 0x1df8: 0x0080, 0x1df9: 0x0080, 0x1dfa: 0x0080, 0x1dfb: 0x0080, + 0x1dfc: 0x0080, 0x1dfd: 0x0080, 0x1dfe: 0x0080, 0x1dff: 0x0080, + // Block 0x78, offset 0x1e00 + 0x1e00: 0x0080, 0x1e01: 0x0080, 0x1e02: 0x0080, 0x1e03: 0x0080, 0x1e04: 0x0080, 0x1e05: 0x0080, + 0x1e06: 0x0080, 0x1e07: 0x0080, 0x1e08: 0x0080, 0x1e09: 0x0080, 0x1e0a: 0x0080, 0x1e0b: 0x0080, + 0x1e0c: 0x0080, 0x1e0d: 0x0080, 0x1e0e: 0x0080, 0x1e0f: 0x0080, 0x1e10: 0x0080, 0x1e11: 0x0080, + 0x1e12: 0x0080, 0x1e13: 0x0080, 0x1e14: 0x0080, 0x1e15: 0x0080, + 0x1e18: 0x0080, 0x1e19: 0x0080, 0x1e1a: 0x0080, 0x1e1b: 0x0080, 0x1e1c: 0x0080, 0x1e1d: 0x0080, + 0x1e1e: 0x0080, 0x1e1f: 0x0080, 0x1e20: 0x0080, 0x1e21: 0x0080, 0x1e22: 0x0080, 0x1e23: 0x0080, + 0x1e24: 0x0080, 0x1e25: 0x0080, 0x1e26: 0x0080, 0x1e27: 0x0080, 0x1e28: 0x0080, 0x1e29: 0x0080, + 0x1e2a: 0x0080, 0x1e2b: 0x0080, 0x1e2c: 0x0080, 0x1e2d: 0x0080, 0x1e2e: 0x0080, 0x1e2f: 0x0080, + 0x1e30: 0x0080, 0x1e31: 0x0080, 0x1e32: 0x0080, 0x1e33: 0x0080, 0x1e34: 0x0080, 0x1e35: 0x0080, + 0x1e36: 0x0080, 0x1e37: 0x0080, 0x1e38: 0x0080, 0x1e39: 0x0080, + 0x1e3d: 0x0080, 0x1e3e: 0x0080, 0x1e3f: 0x0080, + // Block 0x79, offset 0x1e40 + 0x1e40: 0x0080, 0x1e41: 0x0080, 0x1e42: 0x0080, 0x1e43: 0x0080, 0x1e44: 0x0080, 0x1e45: 0x0080, + 0x1e46: 0x0080, 0x1e47: 0x0080, 0x1e48: 0x0080, 0x1e4a: 0x0080, 0x1e4b: 0x0080, + 0x1e4c: 0x0080, 0x1e4d: 0x0080, 0x1e4e: 0x0080, 0x1e4f: 0x0080, 0x1e50: 0x0080, 0x1e51: 0x0080, + 0x1e6c: 0x0080, 0x1e6d: 0x0080, 0x1e6e: 0x0080, 0x1e6f: 0x0080, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0x00c0, 0x1e81: 0x00c0, 0x1e82: 0x00c0, 0x1e83: 0x00c0, 0x1e84: 0x00c0, 0x1e85: 0x00c0, + 0x1e86: 0x00c0, 0x1e87: 0x00c0, 0x1e88: 0x00c0, 0x1e89: 0x00c0, 0x1e8a: 0x00c0, 0x1e8b: 0x00c0, + 0x1e8c: 0x00c0, 0x1e8d: 0x00c0, 0x1e8e: 0x00c0, 0x1e8f: 0x00c0, 0x1e90: 0x00c0, 0x1e91: 0x00c0, + 0x1e92: 0x00c0, 0x1e93: 0x00c0, 0x1e94: 0x00c0, 0x1e95: 0x00c0, 0x1e96: 0x00c0, 0x1e97: 0x00c0, + 0x1e98: 0x00c0, 0x1e99: 0x00c0, 0x1e9a: 0x00c0, 0x1e9b: 0x00c0, 0x1e9c: 0x00c0, 0x1e9d: 0x00c0, + 0x1e9e: 0x00c0, 0x1e9f: 0x00c0, 0x1ea0: 0x00c0, 0x1ea1: 0x00c0, 0x1ea2: 0x00c0, 0x1ea3: 0x00c0, + 0x1ea4: 0x00c0, 0x1ea5: 0x00c0, 0x1ea6: 0x00c0, 0x1ea7: 0x00c0, 0x1ea8: 0x00c0, 0x1ea9: 0x00c0, + 0x1eaa: 0x00c0, 0x1eab: 0x00c0, 0x1eac: 0x00c0, 0x1ead: 0x00c0, 0x1eae: 0x00c0, + 0x1eb0: 0x00c0, 0x1eb1: 0x00c0, 0x1eb2: 0x00c0, 0x1eb3: 0x00c0, 0x1eb4: 0x00c0, 0x1eb5: 0x00c0, + 0x1eb6: 0x00c0, 0x1eb7: 0x00c0, 0x1eb8: 0x00c0, 0x1eb9: 0x00c0, 0x1eba: 0x00c0, 0x1ebb: 0x00c0, + 0x1ebc: 0x00c0, 0x1ebd: 0x00c0, 0x1ebe: 0x00c0, 0x1ebf: 0x00c0, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0x00c0, 0x1ec1: 0x00c0, 0x1ec2: 0x00c0, 0x1ec3: 0x00c0, 0x1ec4: 0x00c0, 0x1ec5: 0x00c0, + 0x1ec6: 0x00c0, 0x1ec7: 0x00c0, 0x1ec8: 0x00c0, 0x1ec9: 0x00c0, 0x1eca: 0x00c0, 0x1ecb: 0x00c0, + 0x1ecc: 0x00c0, 0x1ecd: 0x00c0, 0x1ece: 0x00c0, 0x1ecf: 0x00c0, 0x1ed0: 0x00c0, 0x1ed1: 0x00c0, + 0x1ed2: 0x00c0, 0x1ed3: 0x00c0, 0x1ed4: 0x00c0, 0x1ed5: 0x00c0, 0x1ed6: 0x00c0, 0x1ed7: 0x00c0, + 0x1ed8: 0x00c0, 0x1ed9: 0x00c0, 0x1eda: 0x00c0, 0x1edb: 0x00c0, 0x1edc: 0x00c0, 0x1edd: 0x00c0, + 0x1ede: 0x00c0, 0x1ee0: 0x00c0, 0x1ee1: 0x00c0, 0x1ee2: 0x00c0, 0x1ee3: 0x00c0, + 0x1ee4: 0x00c0, 0x1ee5: 0x00c0, 0x1ee6: 0x00c0, 0x1ee7: 0x00c0, 0x1ee8: 0x00c0, 0x1ee9: 0x00c0, + 0x1eea: 0x00c0, 0x1eeb: 0x00c0, 0x1eec: 0x00c0, 0x1eed: 0x00c0, 0x1eee: 0x00c0, 0x1eef: 0x00c0, + 0x1ef0: 0x00c0, 0x1ef1: 0x00c0, 0x1ef2: 0x00c0, 0x1ef3: 0x00c0, 0x1ef4: 0x00c0, 0x1ef5: 0x00c0, + 0x1ef6: 0x00c0, 0x1ef7: 0x00c0, 0x1ef8: 0x00c0, 0x1ef9: 0x00c0, 0x1efa: 0x00c0, 0x1efb: 0x00c0, + 0x1efc: 0x0080, 0x1efd: 0x0080, 0x1efe: 0x00c0, 0x1eff: 0x00c0, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0x00c0, 0x1f01: 0x00c0, 0x1f02: 0x00c0, 0x1f03: 0x00c0, 0x1f04: 0x00c0, 0x1f05: 0x00c0, + 0x1f06: 0x00c0, 0x1f07: 0x00c0, 0x1f08: 0x00c0, 0x1f09: 0x00c0, 0x1f0a: 0x00c0, 0x1f0b: 0x00c0, + 0x1f0c: 0x00c0, 0x1f0d: 0x00c0, 0x1f0e: 0x00c0, 0x1f0f: 0x00c0, 0x1f10: 0x00c0, 0x1f11: 0x00c0, + 0x1f12: 0x00c0, 0x1f13: 0x00c0, 0x1f14: 0x00c0, 0x1f15: 0x00c0, 0x1f16: 0x00c0, 0x1f17: 0x00c0, + 0x1f18: 0x00c0, 0x1f19: 0x00c0, 0x1f1a: 0x00c0, 0x1f1b: 0x00c0, 0x1f1c: 0x00c0, 0x1f1d: 0x00c0, + 0x1f1e: 0x00c0, 0x1f1f: 0x00c0, 0x1f20: 0x00c0, 0x1f21: 0x00c0, 0x1f22: 0x00c0, 0x1f23: 0x00c0, + 0x1f24: 0x00c0, 0x1f25: 0x0080, 0x1f26: 0x0080, 0x1f27: 0x0080, 0x1f28: 0x0080, 0x1f29: 0x0080, + 0x1f2a: 0x0080, 0x1f2b: 0x00c0, 0x1f2c: 0x00c0, 0x1f2d: 0x00c0, 0x1f2e: 0x00c0, 0x1f2f: 0x00c3, + 0x1f30: 0x00c3, 0x1f31: 0x00c3, 0x1f32: 0x00c0, 0x1f33: 0x00c0, + 0x1f39: 0x0080, 0x1f3a: 0x0080, 0x1f3b: 0x0080, + 0x1f3c: 0x0080, 0x1f3d: 0x0080, 0x1f3e: 0x0080, 0x1f3f: 0x0080, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0x00c0, 0x1f41: 0x00c0, 0x1f42: 0x00c0, 0x1f43: 0x00c0, 0x1f44: 0x00c0, 0x1f45: 0x00c0, + 0x1f46: 0x00c0, 0x1f47: 0x00c0, 0x1f48: 0x00c0, 0x1f49: 0x00c0, 0x1f4a: 0x00c0, 0x1f4b: 0x00c0, + 0x1f4c: 0x00c0, 0x1f4d: 0x00c0, 0x1f4e: 0x00c0, 0x1f4f: 0x00c0, 0x1f50: 0x00c0, 0x1f51: 0x00c0, + 0x1f52: 0x00c0, 0x1f53: 0x00c0, 0x1f54: 0x00c0, 0x1f55: 0x00c0, 0x1f56: 0x00c0, 0x1f57: 0x00c0, + 0x1f58: 0x00c0, 0x1f59: 0x00c0, 0x1f5a: 0x00c0, 0x1f5b: 0x00c0, 0x1f5c: 0x00c0, 0x1f5d: 0x00c0, + 0x1f5e: 0x00c0, 0x1f5f: 0x00c0, 0x1f60: 0x00c0, 0x1f61: 0x00c0, 0x1f62: 0x00c0, 0x1f63: 0x00c0, + 0x1f64: 0x00c0, 0x1f65: 0x00c0, 0x1f67: 0x00c0, + 0x1f6d: 0x00c0, + 0x1f70: 0x00c0, 0x1f71: 0x00c0, 0x1f72: 0x00c0, 0x1f73: 0x00c0, 0x1f74: 0x00c0, 0x1f75: 0x00c0, + 0x1f76: 0x00c0, 0x1f77: 0x00c0, 0x1f78: 0x00c0, 0x1f79: 0x00c0, 0x1f7a: 0x00c0, 0x1f7b: 0x00c0, + 0x1f7c: 0x00c0, 0x1f7d: 0x00c0, 0x1f7e: 0x00c0, 0x1f7f: 0x00c0, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0x00c0, 0x1f81: 0x00c0, 0x1f82: 0x00c0, 0x1f83: 0x00c0, 0x1f84: 0x00c0, 0x1f85: 0x00c0, + 0x1f86: 0x00c0, 0x1f87: 0x00c0, 0x1f88: 0x00c0, 0x1f89: 0x00c0, 0x1f8a: 0x00c0, 0x1f8b: 0x00c0, + 0x1f8c: 0x00c0, 0x1f8d: 0x00c0, 0x1f8e: 0x00c0, 0x1f8f: 0x00c0, 0x1f90: 0x00c0, 0x1f91: 0x00c0, + 0x1f92: 0x00c0, 0x1f93: 0x00c0, 0x1f94: 0x00c0, 0x1f95: 0x00c0, 0x1f96: 0x00c0, 0x1f97: 0x00c0, + 0x1f98: 0x00c0, 0x1f99: 0x00c0, 0x1f9a: 0x00c0, 0x1f9b: 0x00c0, 0x1f9c: 0x00c0, 0x1f9d: 0x00c0, + 0x1f9e: 0x00c0, 0x1f9f: 0x00c0, 0x1fa0: 0x00c0, 0x1fa1: 0x00c0, 0x1fa2: 0x00c0, 0x1fa3: 0x00c0, + 0x1fa4: 0x00c0, 0x1fa5: 0x00c0, 0x1fa6: 0x00c0, 0x1fa7: 0x00c0, + 0x1faf: 0x0080, + 0x1fb0: 0x0080, + 0x1fbf: 0x00c6, + // Block 0x7f, offset 0x1fc0 + 0x1fc0: 0x00c0, 0x1fc1: 0x00c0, 0x1fc2: 0x00c0, 0x1fc3: 0x00c0, 0x1fc4: 0x00c0, 0x1fc5: 0x00c0, + 0x1fc6: 0x00c0, 0x1fc7: 0x00c0, 0x1fc8: 0x00c0, 0x1fc9: 0x00c0, 0x1fca: 0x00c0, 0x1fcb: 0x00c0, + 0x1fcc: 0x00c0, 0x1fcd: 0x00c0, 0x1fce: 0x00c0, 0x1fcf: 0x00c0, 0x1fd0: 0x00c0, 0x1fd1: 0x00c0, + 0x1fd2: 0x00c0, 0x1fd3: 0x00c0, 0x1fd4: 0x00c0, 0x1fd5: 0x00c0, 0x1fd6: 0x00c0, + 0x1fe0: 0x00c0, 0x1fe1: 0x00c0, 0x1fe2: 0x00c0, 0x1fe3: 0x00c0, + 0x1fe4: 0x00c0, 0x1fe5: 0x00c0, 0x1fe6: 0x00c0, 0x1fe8: 0x00c0, 0x1fe9: 0x00c0, + 0x1fea: 0x00c0, 0x1feb: 0x00c0, 0x1fec: 0x00c0, 0x1fed: 0x00c0, 0x1fee: 0x00c0, + 0x1ff0: 0x00c0, 0x1ff1: 0x00c0, 0x1ff2: 0x00c0, 0x1ff3: 0x00c0, 0x1ff4: 0x00c0, 0x1ff5: 0x00c0, + 0x1ff6: 0x00c0, 0x1ff8: 0x00c0, 0x1ff9: 0x00c0, 0x1ffa: 0x00c0, 0x1ffb: 0x00c0, + 0x1ffc: 0x00c0, 0x1ffd: 0x00c0, 0x1ffe: 0x00c0, + // Block 0x80, offset 0x2000 + 0x2000: 0x00c0, 0x2001: 0x00c0, 0x2002: 0x00c0, 0x2003: 0x00c0, 0x2004: 0x00c0, 0x2005: 0x00c0, + 0x2006: 0x00c0, 0x2008: 0x00c0, 0x2009: 0x00c0, 0x200a: 0x00c0, 0x200b: 0x00c0, + 0x200c: 0x00c0, 0x200d: 0x00c0, 0x200e: 0x00c0, 0x2010: 0x00c0, 0x2011: 0x00c0, + 0x2012: 0x00c0, 0x2013: 0x00c0, 0x2014: 0x00c0, 0x2015: 0x00c0, 0x2016: 0x00c0, + 0x2018: 0x00c0, 0x2019: 0x00c0, 0x201a: 0x00c0, 0x201b: 0x00c0, 0x201c: 0x00c0, 0x201d: 0x00c0, + 0x201e: 0x00c0, 0x2020: 0x00c3, 0x2021: 0x00c3, 0x2022: 0x00c3, 0x2023: 0x00c3, + 0x2024: 0x00c3, 0x2025: 0x00c3, 0x2026: 0x00c3, 0x2027: 0x00c3, 0x2028: 0x00c3, 0x2029: 0x00c3, + 0x202a: 0x00c3, 0x202b: 0x00c3, 0x202c: 0x00c3, 0x202d: 0x00c3, 0x202e: 0x00c3, 0x202f: 0x00c3, + 0x2030: 0x00c3, 0x2031: 0x00c3, 0x2032: 0x00c3, 0x2033: 0x00c3, 0x2034: 0x00c3, 0x2035: 0x00c3, + 0x2036: 0x00c3, 0x2037: 0x00c3, 0x2038: 0x00c3, 0x2039: 0x00c3, 0x203a: 0x00c3, 0x203b: 0x00c3, + 0x203c: 0x00c3, 0x203d: 0x00c3, 0x203e: 0x00c3, 0x203f: 0x00c3, + // Block 0x81, offset 0x2040 + 0x2040: 0x0080, 0x2041: 0x0080, 0x2042: 0x0080, 0x2043: 0x0080, 0x2044: 0x0080, 0x2045: 0x0080, + 0x2046: 0x0080, 0x2047: 0x0080, 0x2048: 0x0080, 0x2049: 0x0080, 0x204a: 0x0080, 0x204b: 0x0080, + 0x204c: 0x0080, 0x204d: 0x0080, 0x204e: 0x0080, 0x204f: 0x0080, 0x2050: 0x0080, 0x2051: 0x0080, + 0x2052: 0x0080, 0x2053: 0x0080, 0x2054: 0x0080, 0x2055: 0x0080, 0x2056: 0x0080, 0x2057: 0x0080, + 0x2058: 0x0080, 0x2059: 0x0080, 0x205a: 0x0080, 0x205b: 0x0080, 0x205c: 0x0080, 0x205d: 0x0080, + 0x205e: 0x0080, 0x205f: 0x0080, 0x2060: 0x0080, 0x2061: 0x0080, 0x2062: 0x0080, 0x2063: 0x0080, + 0x2064: 0x0080, 0x2065: 0x0080, 0x2066: 0x0080, 0x2067: 0x0080, 0x2068: 0x0080, 0x2069: 0x0080, + 0x206a: 0x0080, 0x206b: 0x0080, 0x206c: 0x0080, 0x206d: 0x0080, 0x206e: 0x0080, 0x206f: 0x00c0, + 0x2070: 0x0080, 0x2071: 0x0080, 0x2072: 0x0080, 0x2073: 0x0080, 0x2074: 0x0080, 0x2075: 0x0080, + 0x2076: 0x0080, 0x2077: 0x0080, 0x2078: 0x0080, 0x2079: 0x0080, 0x207a: 0x0080, 0x207b: 0x0080, + 0x207c: 0x0080, 0x207d: 0x0080, 0x207e: 0x0080, 0x207f: 0x0080, + // Block 0x82, offset 0x2080 + 0x2080: 0x0080, 0x2081: 0x0080, 0x2082: 0x0080, 0x2083: 0x0080, 0x2084: 0x0080, + // Block 0x83, offset 0x20c0 + 0x20c0: 0x008c, 0x20c1: 0x008c, 0x20c2: 0x008c, 0x20c3: 0x008c, 0x20c4: 0x008c, 0x20c5: 0x008c, + 0x20c6: 0x008c, 0x20c7: 0x008c, 0x20c8: 0x008c, 0x20c9: 0x008c, 0x20ca: 0x008c, 0x20cb: 0x008c, + 0x20cc: 0x008c, 0x20cd: 0x008c, 0x20ce: 0x008c, 0x20cf: 0x008c, 0x20d0: 0x008c, 0x20d1: 0x008c, + 0x20d2: 0x008c, 0x20d3: 0x008c, 0x20d4: 0x008c, 0x20d5: 0x008c, 0x20d6: 0x008c, 0x20d7: 0x008c, + 0x20d8: 0x008c, 0x20d9: 0x008c, 0x20db: 0x008c, 0x20dc: 0x008c, 0x20dd: 0x008c, + 0x20de: 0x008c, 0x20df: 0x008c, 0x20e0: 0x008c, 0x20e1: 0x008c, 0x20e2: 0x008c, 0x20e3: 0x008c, + 0x20e4: 0x008c, 0x20e5: 0x008c, 0x20e6: 0x008c, 0x20e7: 0x008c, 0x20e8: 0x008c, 0x20e9: 0x008c, + 0x20ea: 0x008c, 0x20eb: 0x008c, 0x20ec: 0x008c, 0x20ed: 0x008c, 0x20ee: 0x008c, 0x20ef: 0x008c, + 0x20f0: 0x008c, 0x20f1: 0x008c, 0x20f2: 0x008c, 0x20f3: 0x008c, 0x20f4: 0x008c, 0x20f5: 0x008c, + 0x20f6: 0x008c, 0x20f7: 0x008c, 0x20f8: 0x008c, 0x20f9: 0x008c, 0x20fa: 0x008c, 0x20fb: 0x008c, + 0x20fc: 0x008c, 0x20fd: 0x008c, 0x20fe: 0x008c, 0x20ff: 0x008c, + // Block 0x84, offset 0x2100 + 0x2100: 0x008c, 0x2101: 0x008c, 0x2102: 0x008c, 0x2103: 0x008c, 0x2104: 0x008c, 0x2105: 0x008c, + 0x2106: 0x008c, 0x2107: 0x008c, 0x2108: 0x008c, 0x2109: 0x008c, 0x210a: 0x008c, 0x210b: 0x008c, + 0x210c: 0x008c, 0x210d: 0x008c, 0x210e: 0x008c, 0x210f: 0x008c, 0x2110: 0x008c, 0x2111: 0x008c, + 0x2112: 0x008c, 0x2113: 0x008c, 0x2114: 0x008c, 0x2115: 0x008c, 0x2116: 0x008c, 0x2117: 0x008c, + 0x2118: 0x008c, 0x2119: 0x008c, 0x211a: 0x008c, 0x211b: 0x008c, 0x211c: 0x008c, 0x211d: 0x008c, + 0x211e: 0x008c, 0x211f: 0x008c, 0x2120: 0x008c, 0x2121: 0x008c, 0x2122: 0x008c, 0x2123: 0x008c, + 0x2124: 0x008c, 0x2125: 0x008c, 0x2126: 0x008c, 0x2127: 0x008c, 0x2128: 0x008c, 0x2129: 0x008c, + 0x212a: 0x008c, 0x212b: 0x008c, 0x212c: 0x008c, 0x212d: 0x008c, 0x212e: 0x008c, 0x212f: 0x008c, + 0x2130: 0x008c, 0x2131: 0x008c, 0x2132: 0x008c, 0x2133: 0x008c, + // Block 0x85, offset 0x2140 + 0x2140: 0x008c, 0x2141: 0x008c, 0x2142: 0x008c, 0x2143: 0x008c, 0x2144: 0x008c, 0x2145: 0x008c, + 0x2146: 0x008c, 0x2147: 0x008c, 0x2148: 0x008c, 0x2149: 0x008c, 0x214a: 0x008c, 0x214b: 0x008c, + 0x214c: 0x008c, 0x214d: 0x008c, 0x214e: 0x008c, 0x214f: 0x008c, 0x2150: 0x008c, 0x2151: 0x008c, + 0x2152: 0x008c, 0x2153: 0x008c, 0x2154: 0x008c, 0x2155: 0x008c, 0x2156: 0x008c, 0x2157: 0x008c, + 0x2158: 0x008c, 0x2159: 0x008c, 0x215a: 0x008c, 0x215b: 0x008c, 0x215c: 0x008c, 0x215d: 0x008c, + 0x215e: 0x008c, 0x215f: 0x008c, 0x2160: 0x008c, 0x2161: 0x008c, 0x2162: 0x008c, 0x2163: 0x008c, + 0x2164: 0x008c, 0x2165: 0x008c, 0x2166: 0x008c, 0x2167: 0x008c, 0x2168: 0x008c, 0x2169: 0x008c, + 0x216a: 0x008c, 0x216b: 0x008c, 0x216c: 0x008c, 0x216d: 0x008c, 0x216e: 0x008c, 0x216f: 0x008c, + 0x2170: 0x008c, 0x2171: 0x008c, 0x2172: 0x008c, 0x2173: 0x008c, 0x2174: 0x008c, 0x2175: 0x008c, + 0x2176: 0x008c, 0x2177: 0x008c, 0x2178: 0x008c, 0x2179: 0x008c, 0x217a: 0x008c, 0x217b: 0x008c, + 0x217c: 0x008c, 0x217d: 0x008c, 0x217e: 0x008c, 0x217f: 0x008c, + // Block 0x86, offset 0x2180 + 0x2180: 0x008c, 0x2181: 0x008c, 0x2182: 0x008c, 0x2183: 0x008c, 0x2184: 0x008c, 0x2185: 0x008c, + 0x2186: 0x008c, 0x2187: 0x008c, 0x2188: 0x008c, 0x2189: 0x008c, 0x218a: 0x008c, 0x218b: 0x008c, + 0x218c: 0x008c, 0x218d: 0x008c, 0x218e: 0x008c, 0x218f: 0x008c, 0x2190: 0x008c, 0x2191: 0x008c, + 0x2192: 0x008c, 0x2193: 0x008c, 0x2194: 0x008c, 0x2195: 0x008c, + 0x21b0: 0x0080, 0x21b1: 0x0080, 0x21b2: 0x0080, 0x21b3: 0x0080, 0x21b4: 0x0080, 0x21b5: 0x0080, + 0x21b6: 0x0080, 0x21b7: 0x0080, 0x21b8: 0x0080, 0x21b9: 0x0080, 0x21ba: 0x0080, 0x21bb: 0x0080, + // Block 0x87, offset 0x21c0 + 0x21c0: 0x0080, 0x21c1: 0x0080, 0x21c2: 0x0080, 0x21c3: 0x0080, 0x21c4: 0x0080, 0x21c5: 0x00cc, + 0x21c6: 0x00c0, 0x21c7: 0x00cc, 0x21c8: 0x0080, 0x21c9: 0x0080, 0x21ca: 0x0080, 0x21cb: 0x0080, + 0x21cc: 0x0080, 0x21cd: 0x0080, 0x21ce: 0x0080, 0x21cf: 0x0080, 0x21d0: 0x0080, 0x21d1: 0x0080, + 0x21d2: 0x0080, 0x21d3: 0x0080, 0x21d4: 0x0080, 0x21d5: 0x0080, 0x21d6: 0x0080, 0x21d7: 0x0080, + 0x21d8: 0x0080, 0x21d9: 0x0080, 0x21da: 0x0080, 0x21db: 0x0080, 0x21dc: 0x0080, 0x21dd: 0x0080, + 0x21de: 0x0080, 0x21df: 0x0080, 0x21e0: 0x0080, 0x21e1: 0x008c, 0x21e2: 0x008c, 0x21e3: 0x008c, + 0x21e4: 0x008c, 0x21e5: 0x008c, 0x21e6: 0x008c, 0x21e7: 0x008c, 0x21e8: 0x008c, 0x21e9: 0x008c, + 0x21ea: 0x00c3, 0x21eb: 0x00c3, 0x21ec: 0x00c3, 0x21ed: 0x00c3, 0x21ee: 0x0040, 0x21ef: 0x0040, + 0x21f0: 0x0080, 0x21f1: 0x0040, 0x21f2: 0x0040, 0x21f3: 0x0040, 0x21f4: 0x0040, 0x21f5: 0x0040, + 0x21f6: 0x0080, 0x21f7: 0x0080, 0x21f8: 0x008c, 0x21f9: 0x008c, 0x21fa: 0x008c, 0x21fb: 0x0040, + 0x21fc: 0x00c0, 0x21fd: 0x0080, 0x21fe: 0x0080, 0x21ff: 0x0080, + // Block 0x88, offset 0x2200 + 0x2201: 0x00cc, 0x2202: 0x00cc, 0x2203: 0x00cc, 0x2204: 0x00cc, 0x2205: 0x00cc, + 0x2206: 0x00cc, 0x2207: 0x00cc, 0x2208: 0x00cc, 0x2209: 0x00cc, 0x220a: 0x00cc, 0x220b: 0x00cc, + 0x220c: 0x00cc, 0x220d: 0x00cc, 0x220e: 0x00cc, 0x220f: 0x00cc, 0x2210: 0x00cc, 0x2211: 0x00cc, + 0x2212: 0x00cc, 0x2213: 0x00cc, 0x2214: 0x00cc, 0x2215: 0x00cc, 0x2216: 0x00cc, 0x2217: 0x00cc, + 0x2218: 0x00cc, 0x2219: 0x00cc, 0x221a: 0x00cc, 0x221b: 0x00cc, 0x221c: 0x00cc, 0x221d: 0x00cc, + 0x221e: 0x00cc, 0x221f: 0x00cc, 0x2220: 0x00cc, 0x2221: 0x00cc, 0x2222: 0x00cc, 0x2223: 0x00cc, + 0x2224: 0x00cc, 0x2225: 0x00cc, 0x2226: 0x00cc, 0x2227: 0x00cc, 0x2228: 0x00cc, 0x2229: 0x00cc, + 0x222a: 0x00cc, 0x222b: 0x00cc, 0x222c: 0x00cc, 0x222d: 0x00cc, 0x222e: 0x00cc, 0x222f: 0x00cc, + 0x2230: 0x00cc, 0x2231: 0x00cc, 0x2232: 0x00cc, 0x2233: 0x00cc, 0x2234: 0x00cc, 0x2235: 0x00cc, + 0x2236: 0x00cc, 0x2237: 0x00cc, 0x2238: 0x00cc, 0x2239: 0x00cc, 0x223a: 0x00cc, 0x223b: 0x00cc, + 0x223c: 0x00cc, 0x223d: 0x00cc, 0x223e: 0x00cc, 0x223f: 0x00cc, + // Block 0x89, offset 0x2240 + 0x2240: 0x00cc, 0x2241: 0x00cc, 0x2242: 0x00cc, 0x2243: 0x00cc, 0x2244: 0x00cc, 0x2245: 0x00cc, + 0x2246: 0x00cc, 0x2247: 0x00cc, 0x2248: 0x00cc, 0x2249: 0x00cc, 0x224a: 0x00cc, 0x224b: 0x00cc, + 0x224c: 0x00cc, 0x224d: 0x00cc, 0x224e: 0x00cc, 0x224f: 0x00cc, 0x2250: 0x00cc, 0x2251: 0x00cc, + 0x2252: 0x00cc, 0x2253: 0x00cc, 0x2254: 0x00cc, 0x2255: 0x00cc, 0x2256: 0x00cc, + 0x2259: 0x00c3, 0x225a: 0x00c3, 0x225b: 0x0080, 0x225c: 0x0080, 0x225d: 0x00cc, + 0x225e: 0x00cc, 0x225f: 0x008c, 0x2260: 0x0080, 0x2261: 0x00cc, 0x2262: 0x00cc, 0x2263: 0x00cc, + 0x2264: 0x00cc, 0x2265: 0x00cc, 0x2266: 0x00cc, 0x2267: 0x00cc, 0x2268: 0x00cc, 0x2269: 0x00cc, + 0x226a: 0x00cc, 0x226b: 0x00cc, 0x226c: 0x00cc, 0x226d: 0x00cc, 0x226e: 0x00cc, 0x226f: 0x00cc, + 0x2270: 0x00cc, 0x2271: 0x00cc, 0x2272: 0x00cc, 0x2273: 0x00cc, 0x2274: 0x00cc, 0x2275: 0x00cc, + 0x2276: 0x00cc, 0x2277: 0x00cc, 0x2278: 0x00cc, 0x2279: 0x00cc, 0x227a: 0x00cc, 0x227b: 0x00cc, + 0x227c: 0x00cc, 0x227d: 0x00cc, 0x227e: 0x00cc, 0x227f: 0x00cc, + // Block 0x8a, offset 0x2280 + 0x2280: 0x00cc, 0x2281: 0x00cc, 0x2282: 0x00cc, 0x2283: 0x00cc, 0x2284: 0x00cc, 0x2285: 0x00cc, + 0x2286: 0x00cc, 0x2287: 0x00cc, 0x2288: 0x00cc, 0x2289: 0x00cc, 0x228a: 0x00cc, 0x228b: 0x00cc, + 0x228c: 0x00cc, 0x228d: 0x00cc, 0x228e: 0x00cc, 0x228f: 0x00cc, 0x2290: 0x00cc, 0x2291: 0x00cc, + 0x2292: 0x00cc, 0x2293: 0x00cc, 0x2294: 0x00cc, 0x2295: 0x00cc, 0x2296: 0x00cc, 0x2297: 0x00cc, + 0x2298: 0x00cc, 0x2299: 0x00cc, 0x229a: 0x00cc, 0x229b: 0x00cc, 0x229c: 0x00cc, 0x229d: 0x00cc, + 0x229e: 0x00cc, 0x229f: 0x00cc, 0x22a0: 0x00cc, 0x22a1: 0x00cc, 0x22a2: 0x00cc, 0x22a3: 0x00cc, + 0x22a4: 0x00cc, 0x22a5: 0x00cc, 0x22a6: 0x00cc, 0x22a7: 0x00cc, 0x22a8: 0x00cc, 0x22a9: 0x00cc, + 0x22aa: 0x00cc, 0x22ab: 0x00cc, 0x22ac: 0x00cc, 0x22ad: 0x00cc, 0x22ae: 0x00cc, 0x22af: 0x00cc, + 0x22b0: 0x00cc, 0x22b1: 0x00cc, 0x22b2: 0x00cc, 0x22b3: 0x00cc, 0x22b4: 0x00cc, 0x22b5: 0x00cc, + 0x22b6: 0x00cc, 0x22b7: 0x00cc, 0x22b8: 0x00cc, 0x22b9: 0x00cc, 0x22ba: 0x00cc, 0x22bb: 0x00d2, + 0x22bc: 0x00c0, 0x22bd: 0x00cc, 0x22be: 0x00cc, 0x22bf: 0x008c, + // Block 0x8b, offset 0x22c0 + 0x22c5: 0x00c0, + 0x22c6: 0x00c0, 0x22c7: 0x00c0, 0x22c8: 0x00c0, 0x22c9: 0x00c0, 0x22ca: 0x00c0, 0x22cb: 0x00c0, + 0x22cc: 0x00c0, 0x22cd: 0x00c0, 0x22ce: 0x00c0, 0x22cf: 0x00c0, 0x22d0: 0x00c0, 0x22d1: 0x00c0, + 0x22d2: 0x00c0, 0x22d3: 0x00c0, 0x22d4: 0x00c0, 0x22d5: 0x00c0, 0x22d6: 0x00c0, 0x22d7: 0x00c0, + 0x22d8: 0x00c0, 0x22d9: 0x00c0, 0x22da: 0x00c0, 0x22db: 0x00c0, 0x22dc: 0x00c0, 0x22dd: 0x00c0, + 0x22de: 0x00c0, 0x22df: 0x00c0, 0x22e0: 0x00c0, 0x22e1: 0x00c0, 0x22e2: 0x00c0, 0x22e3: 0x00c0, + 0x22e4: 0x00c0, 0x22e5: 0x00c0, 0x22e6: 0x00c0, 0x22e7: 0x00c0, 0x22e8: 0x00c0, 0x22e9: 0x00c0, + 0x22ea: 0x00c0, 0x22eb: 0x00c0, 0x22ec: 0x00c0, 0x22ed: 0x00c0, + 0x22f1: 0x0080, 0x22f2: 0x0080, 0x22f3: 0x0080, 0x22f4: 0x0080, 0x22f5: 0x0080, + 0x22f6: 0x0080, 0x22f7: 0x0080, 0x22f8: 0x0080, 0x22f9: 0x0080, 0x22fa: 0x0080, 0x22fb: 0x0080, + 0x22fc: 0x0080, 0x22fd: 0x0080, 0x22fe: 0x0080, 0x22ff: 0x0080, + // Block 0x8c, offset 0x2300 + 0x2300: 0x0080, 0x2301: 0x0080, 0x2302: 0x0080, 0x2303: 0x0080, 0x2304: 0x0080, 0x2305: 0x0080, + 0x2306: 0x0080, 0x2307: 0x0080, 0x2308: 0x0080, 0x2309: 0x0080, 0x230a: 0x0080, 0x230b: 0x0080, + 0x230c: 0x0080, 0x230d: 0x0080, 0x230e: 0x0080, 0x230f: 0x0080, 0x2310: 0x0080, 0x2311: 0x0080, + 0x2312: 0x0080, 0x2313: 0x0080, 0x2314: 0x0080, 0x2315: 0x0080, 0x2316: 0x0080, 0x2317: 0x0080, + 0x2318: 0x0080, 0x2319: 0x0080, 0x231a: 0x0080, 0x231b: 0x0080, 0x231c: 0x0080, 0x231d: 0x0080, + 0x231e: 0x0080, 0x231f: 0x0080, 0x2320: 0x0080, 0x2321: 0x0080, 0x2322: 0x0080, 0x2323: 0x0080, + 0x2324: 0x0040, 0x2325: 0x0080, 0x2326: 0x0080, 0x2327: 0x0080, 0x2328: 0x0080, 0x2329: 0x0080, + 0x232a: 0x0080, 0x232b: 0x0080, 0x232c: 0x0080, 0x232d: 0x0080, 0x232e: 0x0080, 0x232f: 0x0080, + 0x2330: 0x0080, 0x2331: 0x0080, 0x2332: 0x0080, 0x2333: 0x0080, 0x2334: 0x0080, 0x2335: 0x0080, + 0x2336: 0x0080, 0x2337: 0x0080, 0x2338: 0x0080, 0x2339: 0x0080, 0x233a: 0x0080, 0x233b: 0x0080, + 0x233c: 0x0080, 0x233d: 0x0080, 0x233e: 0x0080, 0x233f: 0x0080, + // Block 0x8d, offset 0x2340 + 0x2340: 0x0080, 0x2341: 0x0080, 0x2342: 0x0080, 0x2343: 0x0080, 0x2344: 0x0080, 0x2345: 0x0080, + 0x2346: 0x0080, 0x2347: 0x0080, 0x2348: 0x0080, 0x2349: 0x0080, 0x234a: 0x0080, 0x234b: 0x0080, + 0x234c: 0x0080, 0x234d: 0x0080, 0x234e: 0x0080, 0x2350: 0x0080, 0x2351: 0x0080, + 0x2352: 0x0080, 0x2353: 0x0080, 0x2354: 0x0080, 0x2355: 0x0080, 0x2356: 0x0080, 0x2357: 0x0080, + 0x2358: 0x0080, 0x2359: 0x0080, 0x235a: 0x0080, 0x235b: 0x0080, 0x235c: 0x0080, 0x235d: 0x0080, + 0x235e: 0x0080, 0x235f: 0x0080, 0x2360: 0x00c0, 0x2361: 0x00c0, 0x2362: 0x00c0, 0x2363: 0x00c0, + 0x2364: 0x00c0, 0x2365: 0x00c0, 0x2366: 0x00c0, 0x2367: 0x00c0, 0x2368: 0x00c0, 0x2369: 0x00c0, + 0x236a: 0x00c0, 0x236b: 0x00c0, 0x236c: 0x00c0, 0x236d: 0x00c0, 0x236e: 0x00c0, 0x236f: 0x00c0, + 0x2370: 0x00c0, 0x2371: 0x00c0, 0x2372: 0x00c0, 0x2373: 0x00c0, 0x2374: 0x00c0, 0x2375: 0x00c0, + 0x2376: 0x00c0, 0x2377: 0x00c0, 0x2378: 0x00c0, 0x2379: 0x00c0, 0x237a: 0x00c0, + // Block 0x8e, offset 0x2380 + 0x2380: 0x0080, 0x2381: 0x0080, 0x2382: 0x0080, 0x2383: 0x0080, 0x2384: 0x0080, 0x2385: 0x0080, + 0x2386: 0x0080, 0x2387: 0x0080, 0x2388: 0x0080, 0x2389: 0x0080, 0x238a: 0x0080, 0x238b: 0x0080, + 0x238c: 0x0080, 0x238d: 0x0080, 0x238e: 0x0080, 0x238f: 0x0080, 0x2390: 0x0080, 0x2391: 0x0080, + 0x2392: 0x0080, 0x2393: 0x0080, 0x2394: 0x0080, 0x2395: 0x0080, 0x2396: 0x0080, 0x2397: 0x0080, + 0x2398: 0x0080, 0x2399: 0x0080, 0x239a: 0x0080, 0x239b: 0x0080, 0x239c: 0x0080, 0x239d: 0x0080, + 0x239e: 0x0080, 0x239f: 0x0080, 0x23a0: 0x0080, 0x23a1: 0x0080, 0x23a2: 0x0080, 0x23a3: 0x0080, + 0x23b0: 0x00cc, 0x23b1: 0x00cc, 0x23b2: 0x00cc, 0x23b3: 0x00cc, 0x23b4: 0x00cc, 0x23b5: 0x00cc, + 0x23b6: 0x00cc, 0x23b7: 0x00cc, 0x23b8: 0x00cc, 0x23b9: 0x00cc, 0x23ba: 0x00cc, 0x23bb: 0x00cc, + 0x23bc: 0x00cc, 0x23bd: 0x00cc, 0x23be: 0x00cc, 0x23bf: 0x00cc, + // Block 0x8f, offset 0x23c0 + 0x23c0: 0x0080, 0x23c1: 0x0080, 0x23c2: 0x0080, 0x23c3: 0x0080, 0x23c4: 0x0080, 0x23c5: 0x0080, + 0x23c6: 0x0080, 0x23c7: 0x0080, 0x23c8: 0x0080, 0x23c9: 0x0080, 0x23ca: 0x0080, 0x23cb: 0x0080, + 0x23cc: 0x0080, 0x23cd: 0x0080, 0x23ce: 0x0080, 0x23cf: 0x0080, 0x23d0: 0x0080, 0x23d1: 0x0080, + 0x23d2: 0x0080, 0x23d3: 0x0080, 0x23d4: 0x0080, 0x23d5: 0x0080, 0x23d6: 0x0080, 0x23d7: 0x0080, + 0x23d8: 0x0080, 0x23d9: 0x0080, 0x23da: 0x0080, 0x23db: 0x0080, 0x23dc: 0x0080, 0x23dd: 0x0080, + 0x23de: 0x0080, 0x23e0: 0x0080, 0x23e1: 0x0080, 0x23e2: 0x0080, 0x23e3: 0x0080, + 0x23e4: 0x0080, 0x23e5: 0x0080, 0x23e6: 0x0080, 0x23e7: 0x0080, 0x23e8: 0x0080, 0x23e9: 0x0080, + 0x23ea: 0x0080, 0x23eb: 0x0080, 0x23ec: 0x0080, 0x23ed: 0x0080, 0x23ee: 0x0080, 0x23ef: 0x0080, + 0x23f0: 0x0080, 0x23f1: 0x0080, 0x23f2: 0x0080, 0x23f3: 0x0080, 0x23f4: 0x0080, 0x23f5: 0x0080, + 0x23f6: 0x0080, 0x23f7: 0x0080, 0x23f8: 0x0080, 0x23f9: 0x0080, 0x23fa: 0x0080, 0x23fb: 0x0080, + 0x23fc: 0x0080, 0x23fd: 0x0080, 0x23fe: 0x0080, 0x23ff: 0x0080, + // Block 0x90, offset 0x2400 + 0x2400: 0x0080, 0x2401: 0x0080, 0x2402: 0x0080, 0x2403: 0x0080, 0x2404: 0x0080, 0x2405: 0x0080, + 0x2406: 0x0080, 0x2407: 0x0080, 0x2408: 0x0080, 0x2409: 0x0080, 0x240a: 0x0080, 0x240b: 0x0080, + 0x240c: 0x0080, 0x240d: 0x0080, 0x240e: 0x0080, 0x240f: 0x0080, 0x2410: 0x008c, 0x2411: 0x008c, + 0x2412: 0x008c, 0x2413: 0x008c, 0x2414: 0x008c, 0x2415: 0x008c, 0x2416: 0x008c, 0x2417: 0x008c, + 0x2418: 0x008c, 0x2419: 0x008c, 0x241a: 0x008c, 0x241b: 0x008c, 0x241c: 0x008c, 0x241d: 0x008c, + 0x241e: 0x008c, 0x241f: 0x008c, 0x2420: 0x008c, 0x2421: 0x008c, 0x2422: 0x008c, 0x2423: 0x008c, + 0x2424: 0x008c, 0x2425: 0x008c, 0x2426: 0x008c, 0x2427: 0x008c, 0x2428: 0x008c, 0x2429: 0x008c, + 0x242a: 0x008c, 0x242b: 0x008c, 0x242c: 0x008c, 0x242d: 0x008c, 0x242e: 0x008c, 0x242f: 0x008c, + 0x2430: 0x008c, 0x2431: 0x008c, 0x2432: 0x008c, 0x2433: 0x008c, 0x2434: 0x008c, 0x2435: 0x008c, + 0x2436: 0x008c, 0x2437: 0x008c, 0x2438: 0x008c, 0x2439: 0x008c, 0x243a: 0x008c, 0x243b: 0x008c, + 0x243c: 0x008c, 0x243d: 0x008c, 0x243e: 0x008c, + // Block 0x91, offset 0x2440 + 0x2440: 0x008c, 0x2441: 0x008c, 0x2442: 0x008c, 0x2443: 0x008c, 0x2444: 0x008c, 0x2445: 0x008c, + 0x2446: 0x008c, 0x2447: 0x008c, 0x2448: 0x008c, 0x2449: 0x008c, 0x244a: 0x008c, 0x244b: 0x008c, + 0x244c: 0x008c, 0x244d: 0x008c, 0x244e: 0x008c, 0x244f: 0x008c, 0x2450: 0x008c, 0x2451: 0x008c, + 0x2452: 0x008c, 0x2453: 0x008c, 0x2454: 0x008c, 0x2455: 0x008c, 0x2456: 0x008c, 0x2457: 0x008c, + 0x2458: 0x0080, 0x2459: 0x0080, 0x245a: 0x0080, 0x245b: 0x0080, 0x245c: 0x0080, 0x245d: 0x0080, + 0x245e: 0x0080, 0x245f: 0x0080, 0x2460: 0x0080, 0x2461: 0x0080, 0x2462: 0x0080, 0x2463: 0x0080, + 0x2464: 0x0080, 0x2465: 0x0080, 0x2466: 0x0080, 0x2467: 0x0080, 0x2468: 0x0080, 0x2469: 0x0080, + 0x246a: 0x0080, 0x246b: 0x0080, 0x246c: 0x0080, 0x246d: 0x0080, 0x246e: 0x0080, 0x246f: 0x0080, + 0x2470: 0x0080, 0x2471: 0x0080, 0x2472: 0x0080, 0x2473: 0x0080, 0x2474: 0x0080, 0x2475: 0x0080, + 0x2476: 0x0080, 0x2477: 0x0080, 0x2478: 0x0080, 0x2479: 0x0080, 0x247a: 0x0080, 0x247b: 0x0080, + 0x247c: 0x0080, 0x247d: 0x0080, 0x247e: 0x0080, 0x247f: 0x0080, + // Block 0x92, offset 0x2480 + 0x2480: 0x00cc, 0x2481: 0x00cc, 0x2482: 0x00cc, 0x2483: 0x00cc, 0x2484: 0x00cc, 0x2485: 0x00cc, + 0x2486: 0x00cc, 0x2487: 0x00cc, 0x2488: 0x00cc, 0x2489: 0x00cc, 0x248a: 0x00cc, 0x248b: 0x00cc, + 0x248c: 0x00cc, 0x248d: 0x00cc, 0x248e: 0x00cc, 0x248f: 0x00cc, 0x2490: 0x00cc, 0x2491: 0x00cc, + 0x2492: 0x00cc, 0x2493: 0x00cc, 0x2494: 0x00cc, 0x2495: 0x00cc, 0x2496: 0x00cc, 0x2497: 0x00cc, + 0x2498: 0x00cc, 0x2499: 0x00cc, 0x249a: 0x00cc, 0x249b: 0x00cc, 0x249c: 0x00cc, 0x249d: 0x00cc, + 0x249e: 0x00cc, 0x249f: 0x00cc, 0x24a0: 0x00cc, 0x24a1: 0x00cc, 0x24a2: 0x00cc, 0x24a3: 0x00cc, + 0x24a4: 0x00cc, 0x24a5: 0x00cc, 0x24a6: 0x00cc, 0x24a7: 0x00cc, 0x24a8: 0x00cc, 0x24a9: 0x00cc, + 0x24aa: 0x00cc, 0x24ab: 0x00cc, 0x24ac: 0x00cc, 0x24ad: 0x00cc, 0x24ae: 0x00cc, 0x24af: 0x00cc, + 0x24b0: 0x00cc, 0x24b1: 0x00cc, 0x24b2: 0x00cc, 0x24b3: 0x00cc, 0x24b4: 0x00cc, 0x24b5: 0x00cc, + 0x24b6: 0x00cc, 0x24b7: 0x00cc, 0x24b8: 0x00cc, 0x24b9: 0x00cc, 0x24ba: 0x00cc, 0x24bb: 0x00cc, + 0x24bc: 0x00cc, 0x24bd: 0x00cc, 0x24be: 0x00cc, 0x24bf: 0x00cc, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x00cc, 0x24c1: 0x00cc, 0x24c2: 0x00cc, 0x24c3: 0x00cc, 0x24c4: 0x00cc, 0x24c5: 0x00cc, + 0x24c6: 0x00cc, 0x24c7: 0x00cc, 0x24c8: 0x00cc, 0x24c9: 0x00cc, 0x24ca: 0x00cc, 0x24cb: 0x00cc, + 0x24cc: 0x00cc, 0x24cd: 0x00cc, 0x24ce: 0x00cc, 0x24cf: 0x00cc, 0x24d0: 0x00cc, 0x24d1: 0x00cc, + 0x24d2: 0x00cc, 0x24d3: 0x00cc, 0x24d4: 0x00cc, 0x24d5: 0x00cc, 0x24d6: 0x00cc, 0x24d7: 0x00cc, + 0x24d8: 0x00cc, 0x24d9: 0x00cc, 0x24da: 0x00cc, 0x24db: 0x00cc, 0x24dc: 0x00cc, 0x24dd: 0x00cc, + 0x24de: 0x00cc, 0x24df: 0x00cc, 0x24e0: 0x00cc, 0x24e1: 0x00cc, 0x24e2: 0x00cc, 0x24e3: 0x00cc, + 0x24e4: 0x00cc, 0x24e5: 0x00cc, 0x24e6: 0x00cc, 0x24e7: 0x00cc, 0x24e8: 0x00cc, 0x24e9: 0x00cc, + 0x24ea: 0x00cc, 0x24eb: 0x00cc, 0x24ec: 0x00cc, 0x24ed: 0x00cc, 0x24ee: 0x00cc, 0x24ef: 0x00cc, + 0x24f0: 0x00cc, 0x24f1: 0x00cc, 0x24f2: 0x00cc, 0x24f3: 0x00cc, 0x24f4: 0x00cc, 0x24f5: 0x00cc, + // Block 0x94, offset 0x2500 + 0x2500: 0x00cc, 0x2501: 0x00cc, 0x2502: 0x00cc, 0x2503: 0x00cc, 0x2504: 0x00cc, 0x2505: 0x00cc, + 0x2506: 0x00cc, 0x2507: 0x00cc, 0x2508: 0x00cc, 0x2509: 0x00cc, 0x250a: 0x00cc, 0x250b: 0x00cc, + 0x250c: 0x00cc, 0x250d: 0x00cc, 0x250e: 0x00cc, 0x250f: 0x00cc, 0x2510: 0x00cc, 0x2511: 0x00cc, + 0x2512: 0x00cc, 0x2513: 0x00cc, 0x2514: 0x00cc, 0x2515: 0x00cc, + // Block 0x95, offset 0x2540 + 0x2540: 0x00c0, 0x2541: 0x00c0, 0x2542: 0x00c0, 0x2543: 0x00c0, 0x2544: 0x00c0, 0x2545: 0x00c0, + 0x2546: 0x00c0, 0x2547: 0x00c0, 0x2548: 0x00c0, 0x2549: 0x00c0, 0x254a: 0x00c0, 0x254b: 0x00c0, + 0x254c: 0x00c0, 0x2550: 0x0080, 0x2551: 0x0080, + 0x2552: 0x0080, 0x2553: 0x0080, 0x2554: 0x0080, 0x2555: 0x0080, 0x2556: 0x0080, 0x2557: 0x0080, + 0x2558: 0x0080, 0x2559: 0x0080, 0x255a: 0x0080, 0x255b: 0x0080, 0x255c: 0x0080, 0x255d: 0x0080, + 0x255e: 0x0080, 0x255f: 0x0080, 0x2560: 0x0080, 0x2561: 0x0080, 0x2562: 0x0080, 0x2563: 0x0080, + 0x2564: 0x0080, 0x2565: 0x0080, 0x2566: 0x0080, 0x2567: 0x0080, 0x2568: 0x0080, 0x2569: 0x0080, + 0x256a: 0x0080, 0x256b: 0x0080, 0x256c: 0x0080, 0x256d: 0x0080, 0x256e: 0x0080, 0x256f: 0x0080, + 0x2570: 0x0080, 0x2571: 0x0080, 0x2572: 0x0080, 0x2573: 0x0080, 0x2574: 0x0080, 0x2575: 0x0080, + 0x2576: 0x0080, 0x2577: 0x0080, 0x2578: 0x0080, 0x2579: 0x0080, 0x257a: 0x0080, 0x257b: 0x0080, + 0x257c: 0x0080, 0x257d: 0x0080, 0x257e: 0x0080, 0x257f: 0x0080, + // Block 0x96, offset 0x2580 + 0x2580: 0x0080, 0x2581: 0x0080, 0x2582: 0x0080, 0x2583: 0x0080, 0x2584: 0x0080, 0x2585: 0x0080, + 0x2586: 0x0080, + 0x2590: 0x00c0, 0x2591: 0x00c0, + 0x2592: 0x00c0, 0x2593: 0x00c0, 0x2594: 0x00c0, 0x2595: 0x00c0, 0x2596: 0x00c0, 0x2597: 0x00c0, + 0x2598: 0x00c0, 0x2599: 0x00c0, 0x259a: 0x00c0, 0x259b: 0x00c0, 0x259c: 0x00c0, 0x259d: 0x00c0, + 0x259e: 0x00c0, 0x259f: 0x00c0, 0x25a0: 0x00c0, 0x25a1: 0x00c0, 0x25a2: 0x00c0, 0x25a3: 0x00c0, + 0x25a4: 0x00c0, 0x25a5: 0x00c0, 0x25a6: 0x00c0, 0x25a7: 0x00c0, 0x25a8: 0x00c0, 0x25a9: 0x00c0, + 0x25aa: 0x00c0, 0x25ab: 0x00c0, 0x25ac: 0x00c0, 0x25ad: 0x00c0, 0x25ae: 0x00c0, 0x25af: 0x00c0, + 0x25b0: 0x00c0, 0x25b1: 0x00c0, 0x25b2: 0x00c0, 0x25b3: 0x00c0, 0x25b4: 0x00c0, 0x25b5: 0x00c0, + 0x25b6: 0x00c0, 0x25b7: 0x00c0, 0x25b8: 0x00c0, 0x25b9: 0x00c0, 0x25ba: 0x00c0, 0x25bb: 0x00c0, + 0x25bc: 0x00c0, 0x25bd: 0x00c0, 0x25be: 0x0080, 0x25bf: 0x0080, + // Block 0x97, offset 0x25c0 + 0x25c0: 0x00c0, 0x25c1: 0x00c0, 0x25c2: 0x00c0, 0x25c3: 0x00c0, 0x25c4: 0x00c0, 0x25c5: 0x00c0, + 0x25c6: 0x00c0, 0x25c7: 0x00c0, 0x25c8: 0x00c0, 0x25c9: 0x00c0, 0x25ca: 0x00c0, 0x25cb: 0x00c0, + 0x25cc: 0x00c0, 0x25cd: 0x0080, 0x25ce: 0x0080, 0x25cf: 0x0080, 0x25d0: 0x00c0, 0x25d1: 0x00c0, + 0x25d2: 0x00c0, 0x25d3: 0x00c0, 0x25d4: 0x00c0, 0x25d5: 0x00c0, 0x25d6: 0x00c0, 0x25d7: 0x00c0, + 0x25d8: 0x00c0, 0x25d9: 0x00c0, 0x25da: 0x00c0, 0x25db: 0x00c0, 0x25dc: 0x00c0, 0x25dd: 0x00c0, + 0x25de: 0x00c0, 0x25df: 0x00c0, 0x25e0: 0x00c0, 0x25e1: 0x00c0, 0x25e2: 0x00c0, 0x25e3: 0x00c0, + 0x25e4: 0x00c0, 0x25e5: 0x00c0, 0x25e6: 0x00c0, 0x25e7: 0x00c0, 0x25e8: 0x00c0, 0x25e9: 0x00c0, + 0x25ea: 0x00c0, 0x25eb: 0x00c0, + // Block 0x98, offset 0x2600 + 0x2600: 0x00c0, 0x2601: 0x00c0, 0x2602: 0x00c0, 0x2603: 0x00c0, 0x2604: 0x00c0, 0x2605: 0x00c0, + 0x2606: 0x00c0, 0x2607: 0x00c0, 0x2608: 0x00c0, 0x2609: 0x00c0, 0x260a: 0x00c0, 0x260b: 0x00c0, + 0x260c: 0x00c0, 0x260d: 0x00c0, 0x260e: 0x00c0, 0x260f: 0x00c0, 0x2610: 0x00c0, 0x2611: 0x00c0, + 0x2612: 0x00c0, 0x2613: 0x00c0, 0x2614: 0x00c0, 0x2615: 0x00c0, 0x2616: 0x00c0, 0x2617: 0x00c0, + 0x2618: 0x00c0, 0x2619: 0x00c0, 0x261a: 0x00c0, 0x261b: 0x00c0, 0x261c: 0x00c0, 0x261d: 0x00c0, + 0x261e: 0x00c0, 0x261f: 0x00c0, 0x2620: 0x00c0, 0x2621: 0x00c0, 0x2622: 0x00c0, 0x2623: 0x00c0, + 0x2624: 0x00c0, 0x2625: 0x00c0, 0x2626: 0x00c0, 0x2627: 0x00c0, 0x2628: 0x00c0, 0x2629: 0x00c0, + 0x262a: 0x00c0, 0x262b: 0x00c0, 0x262c: 0x00c0, 0x262d: 0x00c0, 0x262e: 0x00c0, 0x262f: 0x00c3, + 0x2630: 0x0083, 0x2631: 0x0083, 0x2632: 0x0083, 0x2633: 0x0080, 0x2634: 0x00c3, 0x2635: 0x00c3, + 0x2636: 0x00c3, 0x2637: 0x00c3, 0x2638: 0x00c3, 0x2639: 0x00c3, 0x263a: 0x00c3, 0x263b: 0x00c3, + 0x263c: 0x00c3, 0x263d: 0x00c3, 0x263e: 0x0080, 0x263f: 0x00c0, + // Block 0x99, offset 0x2640 + 0x2640: 0x00c0, 0x2641: 0x00c0, 0x2642: 0x00c0, 0x2643: 0x00c0, 0x2644: 0x00c0, 0x2645: 0x00c0, + 0x2646: 0x00c0, 0x2647: 0x00c0, 0x2648: 0x00c0, 0x2649: 0x00c0, 0x264a: 0x00c0, 0x264b: 0x00c0, + 0x264c: 0x00c0, 0x264d: 0x00c0, 0x264e: 0x00c0, 0x264f: 0x00c0, 0x2650: 0x00c0, 0x2651: 0x00c0, + 0x2652: 0x00c0, 0x2653: 0x00c0, 0x2654: 0x00c0, 0x2655: 0x00c0, 0x2656: 0x00c0, 0x2657: 0x00c0, + 0x2658: 0x00c0, 0x2659: 0x00c0, 0x265a: 0x00c0, 0x265b: 0x00c0, 0x265c: 0x0080, 0x265d: 0x0080, + 0x265e: 0x00c3, 0x265f: 0x00c3, 0x2660: 0x00c0, 0x2661: 0x00c0, 0x2662: 0x00c0, 0x2663: 0x00c0, + 0x2664: 0x00c0, 0x2665: 0x00c0, 0x2666: 0x00c0, 0x2667: 0x00c0, 0x2668: 0x00c0, 0x2669: 0x00c0, + 0x266a: 0x00c0, 0x266b: 0x00c0, 0x266c: 0x00c0, 0x266d: 0x00c0, 0x266e: 0x00c0, 0x266f: 0x00c0, + 0x2670: 0x00c0, 0x2671: 0x00c0, 0x2672: 0x00c0, 0x2673: 0x00c0, 0x2674: 0x00c0, 0x2675: 0x00c0, + 0x2676: 0x00c0, 0x2677: 0x00c0, 0x2678: 0x00c0, 0x2679: 0x00c0, 0x267a: 0x00c0, 0x267b: 0x00c0, + 0x267c: 0x00c0, 0x267d: 0x00c0, 0x267e: 0x00c0, 0x267f: 0x00c0, + // Block 0x9a, offset 0x2680 + 0x2680: 0x00c0, 0x2681: 0x00c0, 0x2682: 0x00c0, 0x2683: 0x00c0, 0x2684: 0x00c0, 0x2685: 0x00c0, + 0x2686: 0x00c0, 0x2687: 0x00c0, 0x2688: 0x00c0, 0x2689: 0x00c0, 0x268a: 0x00c0, 0x268b: 0x00c0, + 0x268c: 0x00c0, 0x268d: 0x00c0, 0x268e: 0x00c0, 0x268f: 0x00c0, 0x2690: 0x00c0, 0x2691: 0x00c0, + 0x2692: 0x00c0, 0x2693: 0x00c0, 0x2694: 0x00c0, 0x2695: 0x00c0, 0x2696: 0x00c0, 0x2697: 0x00c0, + 0x2698: 0x00c0, 0x2699: 0x00c0, 0x269a: 0x00c0, 0x269b: 0x00c0, 0x269c: 0x00c0, 0x269d: 0x00c0, + 0x269e: 0x00c0, 0x269f: 0x00c0, 0x26a0: 0x00c0, 0x26a1: 0x00c0, 0x26a2: 0x00c0, 0x26a3: 0x00c0, + 0x26a4: 0x00c0, 0x26a5: 0x00c0, 0x26a6: 0x0080, 0x26a7: 0x0080, 0x26a8: 0x0080, 0x26a9: 0x0080, + 0x26aa: 0x0080, 0x26ab: 0x0080, 0x26ac: 0x0080, 0x26ad: 0x0080, 0x26ae: 0x0080, 0x26af: 0x0080, + 0x26b0: 0x00c3, 0x26b1: 0x00c3, 0x26b2: 0x0080, 0x26b3: 0x0080, 0x26b4: 0x0080, 0x26b5: 0x0080, + 0x26b6: 0x0080, 0x26b7: 0x0080, + // Block 0x9b, offset 0x26c0 + 0x26c0: 0x0080, 0x26c1: 0x0080, 0x26c2: 0x0080, 0x26c3: 0x0080, 0x26c4: 0x0080, 0x26c5: 0x0080, + 0x26c6: 0x0080, 0x26c7: 0x0080, 0x26c8: 0x0080, 0x26c9: 0x0080, 0x26ca: 0x0080, 0x26cb: 0x0080, + 0x26cc: 0x0080, 0x26cd: 0x0080, 0x26ce: 0x0080, 0x26cf: 0x0080, 0x26d0: 0x0080, 0x26d1: 0x0080, + 0x26d2: 0x0080, 0x26d3: 0x0080, 0x26d4: 0x0080, 0x26d5: 0x0080, 0x26d6: 0x0080, 0x26d7: 0x00c0, + 0x26d8: 0x00c0, 0x26d9: 0x00c0, 0x26da: 0x00c0, 0x26db: 0x00c0, 0x26dc: 0x00c0, 0x26dd: 0x00c0, + 0x26de: 0x00c0, 0x26df: 0x00c0, 0x26e0: 0x0080, 0x26e1: 0x0080, 0x26e2: 0x00c0, 0x26e3: 0x00c0, + 0x26e4: 0x00c0, 0x26e5: 0x00c0, 0x26e6: 0x00c0, 0x26e7: 0x00c0, 0x26e8: 0x00c0, 0x26e9: 0x00c0, + 0x26ea: 0x00c0, 0x26eb: 0x00c0, 0x26ec: 0x00c0, 0x26ed: 0x00c0, 0x26ee: 0x00c0, 0x26ef: 0x00c0, + 0x26f0: 0x00c0, 0x26f1: 0x00c0, 0x26f2: 0x00c0, 0x26f3: 0x00c0, 0x26f4: 0x00c0, 0x26f5: 0x00c0, + 0x26f6: 0x00c0, 0x26f7: 0x00c0, 0x26f8: 0x00c0, 0x26f9: 0x00c0, 0x26fa: 0x00c0, 0x26fb: 0x00c0, + 0x26fc: 0x00c0, 0x26fd: 0x00c0, 0x26fe: 0x00c0, 0x26ff: 0x00c0, + // Block 0x9c, offset 0x2700 + 0x2700: 0x00c0, 0x2701: 0x00c0, 0x2702: 0x00c0, 0x2703: 0x00c0, 0x2704: 0x00c0, 0x2705: 0x00c0, + 0x2706: 0x00c0, 0x2707: 0x00c0, 0x2708: 0x00c0, 0x2709: 0x00c0, 0x270a: 0x00c0, 0x270b: 0x00c0, + 0x270c: 0x00c0, 0x270d: 0x00c0, 0x270e: 0x00c0, 0x270f: 0x00c0, 0x2710: 0x00c0, 0x2711: 0x00c0, + 0x2712: 0x00c0, 0x2713: 0x00c0, 0x2714: 0x00c0, 0x2715: 0x00c0, 0x2716: 0x00c0, 0x2717: 0x00c0, + 0x2718: 0x00c0, 0x2719: 0x00c0, 0x271a: 0x00c0, 0x271b: 0x00c0, 0x271c: 0x00c0, 0x271d: 0x00c0, + 0x271e: 0x00c0, 0x271f: 0x00c0, 0x2720: 0x00c0, 0x2721: 0x00c0, 0x2722: 0x00c0, 0x2723: 0x00c0, + 0x2724: 0x00c0, 0x2725: 0x00c0, 0x2726: 0x00c0, 0x2727: 0x00c0, 0x2728: 0x00c0, 0x2729: 0x00c0, + 0x272a: 0x00c0, 0x272b: 0x00c0, 0x272c: 0x00c0, 0x272d: 0x00c0, 0x272e: 0x00c0, 0x272f: 0x00c0, + 0x2730: 0x0080, 0x2731: 0x00c0, 0x2732: 0x00c0, 0x2733: 0x00c0, 0x2734: 0x00c0, 0x2735: 0x00c0, + 0x2736: 0x00c0, 0x2737: 0x00c0, 0x2738: 0x00c0, 0x2739: 0x00c0, 0x273a: 0x00c0, 0x273b: 0x00c0, + 0x273c: 0x00c0, 0x273d: 0x00c0, 0x273e: 0x00c0, 0x273f: 0x00c0, + // Block 0x9d, offset 0x2740 + 0x2740: 0x00c0, 0x2741: 0x00c0, 0x2742: 0x00c0, 0x2743: 0x00c0, 0x2744: 0x00c0, 0x2745: 0x00c0, + 0x2746: 0x00c0, 0x2747: 0x00c0, 0x2748: 0x00c0, 0x2749: 0x0080, 0x274a: 0x0080, 0x274b: 0x00c0, + 0x274c: 0x00c0, 0x274d: 0x00c0, 0x274e: 0x00c0, 0x274f: 0x00c0, 0x2750: 0x00c0, 0x2751: 0x00c0, + 0x2752: 0x00c0, 0x2753: 0x00c0, 0x2754: 0x00c0, 0x2755: 0x00c0, 0x2756: 0x00c0, 0x2757: 0x00c0, + 0x2758: 0x00c0, 0x2759: 0x00c0, 0x275a: 0x00c0, 0x275b: 0x00c0, 0x275c: 0x00c0, 0x275d: 0x00c0, + 0x275e: 0x00c0, 0x275f: 0x00c0, 0x2760: 0x00c0, 0x2761: 0x00c0, 0x2762: 0x00c0, 0x2763: 0x00c0, + 0x2764: 0x00c0, 0x2765: 0x00c0, 0x2766: 0x00c0, 0x2767: 0x00c0, 0x2768: 0x00c0, 0x2769: 0x00c0, + 0x276a: 0x00c0, 0x276b: 0x00c0, 0x276c: 0x00c0, 0x276d: 0x00c0, 0x276e: 0x00c0, + 0x2770: 0x00c0, 0x2771: 0x00c0, 0x2772: 0x00c0, 0x2773: 0x00c0, 0x2774: 0x00c0, 0x2775: 0x00c0, + 0x2776: 0x00c0, 0x2777: 0x00c0, + // Block 0x9e, offset 0x2780 + 0x27b7: 0x00c0, 0x27b8: 0x0080, 0x27b9: 0x0080, 0x27ba: 0x00c0, 0x27bb: 0x00c0, + 0x27bc: 0x00c0, 0x27bd: 0x00c0, 0x27be: 0x00c0, 0x27bf: 0x00c0, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x00c0, 0x27c1: 0x00c0, 0x27c2: 0x00c3, 0x27c3: 0x00c0, 0x27c4: 0x00c0, 0x27c5: 0x00c0, + 0x27c6: 0x00c6, 0x27c7: 0x00c0, 0x27c8: 0x00c0, 0x27c9: 0x00c0, 0x27ca: 0x00c0, 0x27cb: 0x00c3, + 0x27cc: 0x00c0, 0x27cd: 0x00c0, 0x27ce: 0x00c0, 0x27cf: 0x00c0, 0x27d0: 0x00c0, 0x27d1: 0x00c0, + 0x27d2: 0x00c0, 0x27d3: 0x00c0, 0x27d4: 0x00c0, 0x27d5: 0x00c0, 0x27d6: 0x00c0, 0x27d7: 0x00c0, + 0x27d8: 0x00c0, 0x27d9: 0x00c0, 0x27da: 0x00c0, 0x27db: 0x00c0, 0x27dc: 0x00c0, 0x27dd: 0x00c0, + 0x27de: 0x00c0, 0x27df: 0x00c0, 0x27e0: 0x00c0, 0x27e1: 0x00c0, 0x27e2: 0x00c0, 0x27e3: 0x00c0, + 0x27e4: 0x00c0, 0x27e5: 0x00c3, 0x27e6: 0x00c3, 0x27e7: 0x00c0, 0x27e8: 0x0080, 0x27e9: 0x0080, + 0x27ea: 0x0080, 0x27eb: 0x0080, + 0x27f0: 0x0080, 0x27f1: 0x0080, 0x27f2: 0x0080, 0x27f3: 0x0080, 0x27f4: 0x0080, 0x27f5: 0x0080, + 0x27f6: 0x0080, 0x27f7: 0x0080, 0x27f8: 0x0080, 0x27f9: 0x0080, + // Block 0xa0, offset 0x2800 + 0x2800: 0x00c2, 0x2801: 0x00c2, 0x2802: 0x00c2, 0x2803: 0x00c2, 0x2804: 0x00c2, 0x2805: 0x00c2, + 0x2806: 0x00c2, 0x2807: 0x00c2, 0x2808: 0x00c2, 0x2809: 0x00c2, 0x280a: 0x00c2, 0x280b: 0x00c2, + 0x280c: 0x00c2, 0x280d: 0x00c2, 0x280e: 0x00c2, 0x280f: 0x00c2, 0x2810: 0x00c2, 0x2811: 0x00c2, + 0x2812: 0x00c2, 0x2813: 0x00c2, 0x2814: 0x00c2, 0x2815: 0x00c2, 0x2816: 0x00c2, 0x2817: 0x00c2, + 0x2818: 0x00c2, 0x2819: 0x00c2, 0x281a: 0x00c2, 0x281b: 0x00c2, 0x281c: 0x00c2, 0x281d: 0x00c2, + 0x281e: 0x00c2, 0x281f: 0x00c2, 0x2820: 0x00c2, 0x2821: 0x00c2, 0x2822: 0x00c2, 0x2823: 0x00c2, + 0x2824: 0x00c2, 0x2825: 0x00c2, 0x2826: 0x00c2, 0x2827: 0x00c2, 0x2828: 0x00c2, 0x2829: 0x00c2, + 0x282a: 0x00c2, 0x282b: 0x00c2, 0x282c: 0x00c2, 0x282d: 0x00c2, 0x282e: 0x00c2, 0x282f: 0x00c2, + 0x2830: 0x00c2, 0x2831: 0x00c2, 0x2832: 0x00c1, 0x2833: 0x00c0, 0x2834: 0x0080, 0x2835: 0x0080, + 0x2836: 0x0080, 0x2837: 0x0080, + // Block 0xa1, offset 0x2840 + 0x2840: 0x00c0, 0x2841: 0x00c0, 0x2842: 0x00c0, 0x2843: 0x00c0, 0x2844: 0x00c6, 0x2845: 0x00c3, + 0x284e: 0x0080, 0x284f: 0x0080, 0x2850: 0x00c0, 0x2851: 0x00c0, + 0x2852: 0x00c0, 0x2853: 0x00c0, 0x2854: 0x00c0, 0x2855: 0x00c0, 0x2856: 0x00c0, 0x2857: 0x00c0, + 0x2858: 0x00c0, 0x2859: 0x00c0, + 0x2860: 0x00c3, 0x2861: 0x00c3, 0x2862: 0x00c3, 0x2863: 0x00c3, + 0x2864: 0x00c3, 0x2865: 0x00c3, 0x2866: 0x00c3, 0x2867: 0x00c3, 0x2868: 0x00c3, 0x2869: 0x00c3, + 0x286a: 0x00c3, 0x286b: 0x00c3, 0x286c: 0x00c3, 0x286d: 0x00c3, 0x286e: 0x00c3, 0x286f: 0x00c3, + 0x2870: 0x00c3, 0x2871: 0x00c3, 0x2872: 0x00c0, 0x2873: 0x00c0, 0x2874: 0x00c0, 0x2875: 0x00c0, + 0x2876: 0x00c0, 0x2877: 0x00c0, 0x2878: 0x0080, 0x2879: 0x0080, 0x287a: 0x0080, 0x287b: 0x00c0, + 0x287c: 0x0080, 0x287d: 0x00c0, + // Block 0xa2, offset 0x2880 + 0x2880: 0x00c0, 0x2881: 0x00c0, 0x2882: 0x00c0, 0x2883: 0x00c0, 0x2884: 0x00c0, 0x2885: 0x00c0, + 0x2886: 0x00c0, 0x2887: 0x00c0, 0x2888: 0x00c0, 0x2889: 0x00c0, 0x288a: 0x00c0, 0x288b: 0x00c0, + 0x288c: 0x00c0, 0x288d: 0x00c0, 0x288e: 0x00c0, 0x288f: 0x00c0, 0x2890: 0x00c0, 0x2891: 0x00c0, + 0x2892: 0x00c0, 0x2893: 0x00c0, 0x2894: 0x00c0, 0x2895: 0x00c0, 0x2896: 0x00c0, 0x2897: 0x00c0, + 0x2898: 0x00c0, 0x2899: 0x00c0, 0x289a: 0x00c0, 0x289b: 0x00c0, 0x289c: 0x00c0, 0x289d: 0x00c0, + 0x289e: 0x00c0, 0x289f: 0x00c0, 0x28a0: 0x00c0, 0x28a1: 0x00c0, 0x28a2: 0x00c0, 0x28a3: 0x00c0, + 0x28a4: 0x00c0, 0x28a5: 0x00c0, 0x28a6: 0x00c3, 0x28a7: 0x00c3, 0x28a8: 0x00c3, 0x28a9: 0x00c3, + 0x28aa: 0x00c3, 0x28ab: 0x00c3, 0x28ac: 0x00c3, 0x28ad: 0x00c3, 0x28ae: 0x0080, 0x28af: 0x0080, + 0x28b0: 0x00c0, 0x28b1: 0x00c0, 0x28b2: 0x00c0, 0x28b3: 0x00c0, 0x28b4: 0x00c0, 0x28b5: 0x00c0, + 0x28b6: 0x00c0, 0x28b7: 0x00c0, 0x28b8: 0x00c0, 0x28b9: 0x00c0, 0x28ba: 0x00c0, 0x28bb: 0x00c0, + 0x28bc: 0x00c0, 0x28bd: 0x00c0, 0x28be: 0x00c0, 0x28bf: 0x00c0, + // Block 0xa3, offset 0x28c0 + 0x28c0: 0x00c0, 0x28c1: 0x00c0, 0x28c2: 0x00c0, 0x28c3: 0x00c0, 0x28c4: 0x00c0, 0x28c5: 0x00c0, + 0x28c6: 0x00c0, 0x28c7: 0x00c3, 0x28c8: 0x00c3, 0x28c9: 0x00c3, 0x28ca: 0x00c3, 0x28cb: 0x00c3, + 0x28cc: 0x00c3, 0x28cd: 0x00c3, 0x28ce: 0x00c3, 0x28cf: 0x00c3, 0x28d0: 0x00c3, 0x28d1: 0x00c3, + 0x28d2: 0x00c0, 0x28d3: 0x00c5, + 0x28df: 0x0080, 0x28e0: 0x0040, 0x28e1: 0x0040, 0x28e2: 0x0040, 0x28e3: 0x0040, + 0x28e4: 0x0040, 0x28e5: 0x0040, 0x28e6: 0x0040, 0x28e7: 0x0040, 0x28e8: 0x0040, 0x28e9: 0x0040, + 0x28ea: 0x0040, 0x28eb: 0x0040, 0x28ec: 0x0040, 0x28ed: 0x0040, 0x28ee: 0x0040, 0x28ef: 0x0040, + 0x28f0: 0x0040, 0x28f1: 0x0040, 0x28f2: 0x0040, 0x28f3: 0x0040, 0x28f4: 0x0040, 0x28f5: 0x0040, + 0x28f6: 0x0040, 0x28f7: 0x0040, 0x28f8: 0x0040, 0x28f9: 0x0040, 0x28fa: 0x0040, 0x28fb: 0x0040, + 0x28fc: 0x0040, + // Block 0xa4, offset 0x2900 + 0x2900: 0x00c3, 0x2901: 0x00c3, 0x2902: 0x00c3, 0x2903: 0x00c0, 0x2904: 0x00c0, 0x2905: 0x00c0, + 0x2906: 0x00c0, 0x2907: 0x00c0, 0x2908: 0x00c0, 0x2909: 0x00c0, 0x290a: 0x00c0, 0x290b: 0x00c0, + 0x290c: 0x00c0, 0x290d: 0x00c0, 0x290e: 0x00c0, 0x290f: 0x00c0, 0x2910: 0x00c0, 0x2911: 0x00c0, + 0x2912: 0x00c0, 0x2913: 0x00c0, 0x2914: 0x00c0, 0x2915: 0x00c0, 0x2916: 0x00c0, 0x2917: 0x00c0, + 0x2918: 0x00c0, 0x2919: 0x00c0, 0x291a: 0x00c0, 0x291b: 0x00c0, 0x291c: 0x00c0, 0x291d: 0x00c0, + 0x291e: 0x00c0, 0x291f: 0x00c0, 0x2920: 0x00c0, 0x2921: 0x00c0, 0x2922: 0x00c0, 0x2923: 0x00c0, + 0x2924: 0x00c0, 0x2925: 0x00c0, 0x2926: 0x00c0, 0x2927: 0x00c0, 0x2928: 0x00c0, 0x2929: 0x00c0, + 0x292a: 0x00c0, 0x292b: 0x00c0, 0x292c: 0x00c0, 0x292d: 0x00c0, 0x292e: 0x00c0, 0x292f: 0x00c0, + 0x2930: 0x00c0, 0x2931: 0x00c0, 0x2932: 0x00c0, 0x2933: 0x00c3, 0x2934: 0x00c0, 0x2935: 0x00c0, + 0x2936: 0x00c3, 0x2937: 0x00c3, 0x2938: 0x00c3, 0x2939: 0x00c3, 0x293a: 0x00c0, 0x293b: 0x00c0, + 0x293c: 0x00c3, 0x293d: 0x00c0, 0x293e: 0x00c0, 0x293f: 0x00c0, + // Block 0xa5, offset 0x2940 + 0x2940: 0x00c5, 0x2941: 0x0080, 0x2942: 0x0080, 0x2943: 0x0080, 0x2944: 0x0080, 0x2945: 0x0080, + 0x2946: 0x0080, 0x2947: 0x0080, 0x2948: 0x0080, 0x2949: 0x0080, 0x294a: 0x0080, 0x294b: 0x0080, + 0x294c: 0x0080, 0x294d: 0x0080, 0x294f: 0x00c0, 0x2950: 0x00c0, 0x2951: 0x00c0, + 0x2952: 0x00c0, 0x2953: 0x00c0, 0x2954: 0x00c0, 0x2955: 0x00c0, 0x2956: 0x00c0, 0x2957: 0x00c0, + 0x2958: 0x00c0, 0x2959: 0x00c0, + 0x295e: 0x0080, 0x295f: 0x0080, 0x2960: 0x00c0, 0x2961: 0x00c0, 0x2962: 0x00c0, 0x2963: 0x00c0, + 0x2964: 0x00c0, 0x2965: 0x00c3, 0x2966: 0x00c0, 0x2967: 0x00c0, 0x2968: 0x00c0, 0x2969: 0x00c0, + 0x296a: 0x00c0, 0x296b: 0x00c0, 0x296c: 0x00c0, 0x296d: 0x00c0, 0x296e: 0x00c0, 0x296f: 0x00c0, + 0x2970: 0x00c0, 0x2971: 0x00c0, 0x2972: 0x00c0, 0x2973: 0x00c0, 0x2974: 0x00c0, 0x2975: 0x00c0, + 0x2976: 0x00c0, 0x2977: 0x00c0, 0x2978: 0x00c0, 0x2979: 0x00c0, 0x297a: 0x00c0, 0x297b: 0x00c0, + 0x297c: 0x00c0, 0x297d: 0x00c0, 0x297e: 0x00c0, + // Block 0xa6, offset 0x2980 + 0x2980: 0x00c0, 0x2981: 0x00c0, 0x2982: 0x00c0, 0x2983: 0x00c0, 0x2984: 0x00c0, 0x2985: 0x00c0, + 0x2986: 0x00c0, 0x2987: 0x00c0, 0x2988: 0x00c0, 0x2989: 0x00c0, 0x298a: 0x00c0, 0x298b: 0x00c0, + 0x298c: 0x00c0, 0x298d: 0x00c0, 0x298e: 0x00c0, 0x298f: 0x00c0, 0x2990: 0x00c0, 0x2991: 0x00c0, + 0x2992: 0x00c0, 0x2993: 0x00c0, 0x2994: 0x00c0, 0x2995: 0x00c0, 0x2996: 0x00c0, 0x2997: 0x00c0, + 0x2998: 0x00c0, 0x2999: 0x00c0, 0x299a: 0x00c0, 0x299b: 0x00c0, 0x299c: 0x00c0, 0x299d: 0x00c0, + 0x299e: 0x00c0, 0x299f: 0x00c0, 0x29a0: 0x00c0, 0x29a1: 0x00c0, 0x29a2: 0x00c0, 0x29a3: 0x00c0, + 0x29a4: 0x00c0, 0x29a5: 0x00c0, 0x29a6: 0x00c0, 0x29a7: 0x00c0, 0x29a8: 0x00c0, 0x29a9: 0x00c3, + 0x29aa: 0x00c3, 0x29ab: 0x00c3, 0x29ac: 0x00c3, 0x29ad: 0x00c3, 0x29ae: 0x00c3, 0x29af: 0x00c0, + 0x29b0: 0x00c0, 0x29b1: 0x00c3, 0x29b2: 0x00c3, 0x29b3: 0x00c0, 0x29b4: 0x00c0, 0x29b5: 0x00c3, + 0x29b6: 0x00c3, + // Block 0xa7, offset 0x29c0 + 0x29c0: 0x00c0, 0x29c1: 0x00c0, 0x29c2: 0x00c0, 0x29c3: 0x00c3, 0x29c4: 0x00c0, 0x29c5: 0x00c0, + 0x29c6: 0x00c0, 0x29c7: 0x00c0, 0x29c8: 0x00c0, 0x29c9: 0x00c0, 0x29ca: 0x00c0, 0x29cb: 0x00c0, + 0x29cc: 0x00c3, 0x29cd: 0x00c0, 0x29d0: 0x00c0, 0x29d1: 0x00c0, + 0x29d2: 0x00c0, 0x29d3: 0x00c0, 0x29d4: 0x00c0, 0x29d5: 0x00c0, 0x29d6: 0x00c0, 0x29d7: 0x00c0, + 0x29d8: 0x00c0, 0x29d9: 0x00c0, 0x29dc: 0x0080, 0x29dd: 0x0080, + 0x29de: 0x0080, 0x29df: 0x0080, 0x29e0: 0x00c0, 0x29e1: 0x00c0, 0x29e2: 0x00c0, 0x29e3: 0x00c0, + 0x29e4: 0x00c0, 0x29e5: 0x00c0, 0x29e6: 0x00c0, 0x29e7: 0x00c0, 0x29e8: 0x00c0, 0x29e9: 0x00c0, + 0x29ea: 0x00c0, 0x29eb: 0x00c0, 0x29ec: 0x00c0, 0x29ed: 0x00c0, 0x29ee: 0x00c0, 0x29ef: 0x00c0, + 0x29f0: 0x00c0, 0x29f1: 0x00c0, 0x29f2: 0x00c0, 0x29f3: 0x00c0, 0x29f4: 0x00c0, 0x29f5: 0x00c0, + 0x29f6: 0x00c0, 0x29f7: 0x0080, 0x29f8: 0x0080, 0x29f9: 0x0080, 0x29fa: 0x00c0, 0x29fb: 0x00c0, + 0x29fc: 0x00c3, 0x29fd: 0x00c0, 0x29fe: 0x00c0, 0x29ff: 0x00c0, + // Block 0xa8, offset 0x2a00 + 0x2a00: 0x00c0, 0x2a01: 0x00c0, 0x2a02: 0x00c0, 0x2a03: 0x00c0, 0x2a04: 0x00c0, 0x2a05: 0x00c0, + 0x2a06: 0x00c0, 0x2a07: 0x00c0, 0x2a08: 0x00c0, 0x2a09: 0x00c0, 0x2a0a: 0x00c0, 0x2a0b: 0x00c0, + 0x2a0c: 0x00c0, 0x2a0d: 0x00c0, 0x2a0e: 0x00c0, 0x2a0f: 0x00c0, 0x2a10: 0x00c0, 0x2a11: 0x00c0, + 0x2a12: 0x00c0, 0x2a13: 0x00c0, 0x2a14: 0x00c0, 0x2a15: 0x00c0, 0x2a16: 0x00c0, 0x2a17: 0x00c0, + 0x2a18: 0x00c0, 0x2a19: 0x00c0, 0x2a1a: 0x00c0, 0x2a1b: 0x00c0, 0x2a1c: 0x00c0, 0x2a1d: 0x00c0, + 0x2a1e: 0x00c0, 0x2a1f: 0x00c0, 0x2a20: 0x00c0, 0x2a21: 0x00c0, 0x2a22: 0x00c0, 0x2a23: 0x00c0, + 0x2a24: 0x00c0, 0x2a25: 0x00c0, 0x2a26: 0x00c0, 0x2a27: 0x00c0, 0x2a28: 0x00c0, 0x2a29: 0x00c0, + 0x2a2a: 0x00c0, 0x2a2b: 0x00c0, 0x2a2c: 0x00c0, 0x2a2d: 0x00c0, 0x2a2e: 0x00c0, 0x2a2f: 0x00c0, + 0x2a30: 0x00c3, 0x2a31: 0x00c0, 0x2a32: 0x00c3, 0x2a33: 0x00c3, 0x2a34: 0x00c3, 0x2a35: 0x00c0, + 0x2a36: 0x00c0, 0x2a37: 0x00c3, 0x2a38: 0x00c3, 0x2a39: 0x00c0, 0x2a3a: 0x00c0, 0x2a3b: 0x00c0, + 0x2a3c: 0x00c0, 0x2a3d: 0x00c0, 0x2a3e: 0x00c3, 0x2a3f: 0x00c3, + // Block 0xa9, offset 0x2a40 + 0x2a40: 0x00c0, 0x2a41: 0x00c3, 0x2a42: 0x00c0, + 0x2a5b: 0x00c0, 0x2a5c: 0x00c0, 0x2a5d: 0x00c0, + 0x2a5e: 0x0080, 0x2a5f: 0x0080, 0x2a60: 0x00c0, 0x2a61: 0x00c0, 0x2a62: 0x00c0, 0x2a63: 0x00c0, + 0x2a64: 0x00c0, 0x2a65: 0x00c0, 0x2a66: 0x00c0, 0x2a67: 0x00c0, 0x2a68: 0x00c0, 0x2a69: 0x00c0, + 0x2a6a: 0x00c0, 0x2a6b: 0x00c0, 0x2a6c: 0x00c3, 0x2a6d: 0x00c3, 0x2a6e: 0x00c0, 0x2a6f: 0x00c0, + 0x2a70: 0x0080, 0x2a71: 0x0080, 0x2a72: 0x00c0, 0x2a73: 0x00c0, 0x2a74: 0x00c0, 0x2a75: 0x00c0, + 0x2a76: 0x00c6, + // Block 0xaa, offset 0x2a80 + 0x2a81: 0x00c0, 0x2a82: 0x00c0, 0x2a83: 0x00c0, 0x2a84: 0x00c0, 0x2a85: 0x00c0, + 0x2a86: 0x00c0, 0x2a89: 0x00c0, 0x2a8a: 0x00c0, 0x2a8b: 0x00c0, + 0x2a8c: 0x00c0, 0x2a8d: 0x00c0, 0x2a8e: 0x00c0, 0x2a91: 0x00c0, + 0x2a92: 0x00c0, 0x2a93: 0x00c0, 0x2a94: 0x00c0, 0x2a95: 0x00c0, 0x2a96: 0x00c0, + 0x2aa0: 0x00c0, 0x2aa1: 0x00c0, 0x2aa2: 0x00c0, 0x2aa3: 0x00c0, + 0x2aa4: 0x00c0, 0x2aa5: 0x00c0, 0x2aa6: 0x00c0, 0x2aa8: 0x00c0, 0x2aa9: 0x00c0, + 0x2aaa: 0x00c0, 0x2aab: 0x00c0, 0x2aac: 0x00c0, 0x2aad: 0x00c0, 0x2aae: 0x00c0, + 0x2ab0: 0x00c0, 0x2ab1: 0x00c0, 0x2ab2: 0x00c0, 0x2ab3: 0x00c0, 0x2ab4: 0x00c0, 0x2ab5: 0x00c0, + 0x2ab6: 0x00c0, 0x2ab7: 0x00c0, 0x2ab8: 0x00c0, 0x2ab9: 0x00c0, 0x2aba: 0x00c0, 0x2abb: 0x00c0, + 0x2abc: 0x00c0, 0x2abd: 0x00c0, 0x2abe: 0x00c0, 0x2abf: 0x00c0, + // Block 0xab, offset 0x2ac0 + 0x2ac0: 0x00c0, 0x2ac1: 0x00c0, 0x2ac2: 0x00c0, 0x2ac3: 0x00c0, 0x2ac4: 0x00c0, 0x2ac5: 0x00c0, + 0x2ac6: 0x00c0, 0x2ac7: 0x00c0, 0x2ac8: 0x00c0, 0x2ac9: 0x00c0, 0x2aca: 0x00c0, 0x2acb: 0x00c0, + 0x2acc: 0x00c0, 0x2acd: 0x00c0, 0x2ace: 0x00c0, 0x2acf: 0x00c0, 0x2ad0: 0x00c0, 0x2ad1: 0x00c0, + 0x2ad2: 0x00c0, 0x2ad3: 0x00c0, 0x2ad4: 0x00c0, 0x2ad5: 0x00c0, 0x2ad6: 0x00c0, 0x2ad7: 0x00c0, + 0x2ad8: 0x00c0, 0x2ad9: 0x00c0, 0x2ada: 0x00c0, 0x2adb: 0x0080, 0x2adc: 0x0080, 0x2add: 0x0080, + 0x2ade: 0x0080, 0x2adf: 0x0080, 0x2ae0: 0x00c0, 0x2ae1: 0x00c0, 0x2ae2: 0x00c0, 0x2ae3: 0x00c0, + 0x2ae4: 0x00c0, 0x2ae5: 0x00c8, + 0x2af0: 0x00c0, 0x2af1: 0x00c0, 0x2af2: 0x00c0, 0x2af3: 0x00c0, 0x2af4: 0x00c0, 0x2af5: 0x00c0, + 0x2af6: 0x00c0, 0x2af7: 0x00c0, 0x2af8: 0x00c0, 0x2af9: 0x00c0, 0x2afa: 0x00c0, 0x2afb: 0x00c0, + 0x2afc: 0x00c0, 0x2afd: 0x00c0, 0x2afe: 0x00c0, 0x2aff: 0x00c0, + // Block 0xac, offset 0x2b00 + 0x2b00: 0x00c0, 0x2b01: 0x00c0, 0x2b02: 0x00c0, 0x2b03: 0x00c0, 0x2b04: 0x00c0, 0x2b05: 0x00c0, + 0x2b06: 0x00c0, 0x2b07: 0x00c0, 0x2b08: 0x00c0, 0x2b09: 0x00c0, 0x2b0a: 0x00c0, 0x2b0b: 0x00c0, + 0x2b0c: 0x00c0, 0x2b0d: 0x00c0, 0x2b0e: 0x00c0, 0x2b0f: 0x00c0, 0x2b10: 0x00c0, 0x2b11: 0x00c0, + 0x2b12: 0x00c0, 0x2b13: 0x00c0, 0x2b14: 0x00c0, 0x2b15: 0x00c0, 0x2b16: 0x00c0, 0x2b17: 0x00c0, + 0x2b18: 0x00c0, 0x2b19: 0x00c0, 0x2b1a: 0x00c0, 0x2b1b: 0x00c0, 0x2b1c: 0x00c0, 0x2b1d: 0x00c0, + 0x2b1e: 0x00c0, 0x2b1f: 0x00c0, 0x2b20: 0x00c0, 0x2b21: 0x00c0, 0x2b22: 0x00c0, 0x2b23: 0x00c0, + 0x2b24: 0x00c0, 0x2b25: 0x00c3, 0x2b26: 0x00c0, 0x2b27: 0x00c0, 0x2b28: 0x00c3, 0x2b29: 0x00c0, + 0x2b2a: 0x00c0, 0x2b2b: 0x0080, 0x2b2c: 0x00c0, 0x2b2d: 0x00c6, + 0x2b30: 0x00c0, 0x2b31: 0x00c0, 0x2b32: 0x00c0, 0x2b33: 0x00c0, 0x2b34: 0x00c0, 0x2b35: 0x00c0, + 0x2b36: 0x00c0, 0x2b37: 0x00c0, 0x2b38: 0x00c0, 0x2b39: 0x00c0, + // Block 0xad, offset 0x2b40 + 0x2b40: 0x00c0, 0x2b41: 0x00c0, 0x2b42: 0x00c0, 0x2b43: 0x00c0, 0x2b44: 0x00c0, 0x2b45: 0x00c0, + 0x2b46: 0x00c0, 0x2b47: 0x00c0, 0x2b48: 0x00c0, 0x2b49: 0x00c0, 0x2b4a: 0x00c0, 0x2b4b: 0x00c0, + 0x2b4c: 0x00c0, 0x2b4d: 0x00c0, 0x2b4e: 0x00c0, 0x2b4f: 0x00c0, 0x2b50: 0x00c0, 0x2b51: 0x00c0, + 0x2b52: 0x00c0, 0x2b53: 0x00c0, 0x2b54: 0x00c0, 0x2b55: 0x00c0, 0x2b56: 0x00c0, 0x2b57: 0x00c0, + 0x2b58: 0x00c0, 0x2b59: 0x00c0, 0x2b5a: 0x00c0, 0x2b5b: 0x00c0, 0x2b5c: 0x00c0, 0x2b5d: 0x00c0, + 0x2b5e: 0x00c0, 0x2b5f: 0x00c0, 0x2b60: 0x00c0, 0x2b61: 0x00c0, 0x2b62: 0x00c0, 0x2b63: 0x00c0, + 0x2b70: 0x0040, 0x2b71: 0x0040, 0x2b72: 0x0040, 0x2b73: 0x0040, 0x2b74: 0x0040, 0x2b75: 0x0040, + 0x2b76: 0x0040, 0x2b77: 0x0040, 0x2b78: 0x0040, 0x2b79: 0x0040, 0x2b7a: 0x0040, 0x2b7b: 0x0040, + 0x2b7c: 0x0040, 0x2b7d: 0x0040, 0x2b7e: 0x0040, 0x2b7f: 0x0040, + // Block 0xae, offset 0x2b80 + 0x2b80: 0x0040, 0x2b81: 0x0040, 0x2b82: 0x0040, 0x2b83: 0x0040, 0x2b84: 0x0040, 0x2b85: 0x0040, + 0x2b86: 0x0040, 0x2b8b: 0x0040, + 0x2b8c: 0x0040, 0x2b8d: 0x0040, 0x2b8e: 0x0040, 0x2b8f: 0x0040, 0x2b90: 0x0040, 0x2b91: 0x0040, + 0x2b92: 0x0040, 0x2b93: 0x0040, 0x2b94: 0x0040, 0x2b95: 0x0040, 0x2b96: 0x0040, 0x2b97: 0x0040, + 0x2b98: 0x0040, 0x2b99: 0x0040, 0x2b9a: 0x0040, 0x2b9b: 0x0040, 0x2b9c: 0x0040, 0x2b9d: 0x0040, + 0x2b9e: 0x0040, 0x2b9f: 0x0040, 0x2ba0: 0x0040, 0x2ba1: 0x0040, 0x2ba2: 0x0040, 0x2ba3: 0x0040, + 0x2ba4: 0x0040, 0x2ba5: 0x0040, 0x2ba6: 0x0040, 0x2ba7: 0x0040, 0x2ba8: 0x0040, 0x2ba9: 0x0040, + 0x2baa: 0x0040, 0x2bab: 0x0040, 0x2bac: 0x0040, 0x2bad: 0x0040, 0x2bae: 0x0040, 0x2baf: 0x0040, + 0x2bb0: 0x0040, 0x2bb1: 0x0040, 0x2bb2: 0x0040, 0x2bb3: 0x0040, 0x2bb4: 0x0040, 0x2bb5: 0x0040, + 0x2bb6: 0x0040, 0x2bb7: 0x0040, 0x2bb8: 0x0040, 0x2bb9: 0x0040, 0x2bba: 0x0040, 0x2bbb: 0x0040, + // Block 0xaf, offset 0x2bc0 + 0x2bc0: 0x008c, 0x2bc1: 0x008c, 0x2bc2: 0x008c, 0x2bc3: 0x008c, 0x2bc4: 0x008c, 0x2bc5: 0x008c, + 0x2bc6: 0x008c, 0x2bc7: 0x008c, 0x2bc8: 0x008c, 0x2bc9: 0x008c, 0x2bca: 0x008c, 0x2bcb: 0x008c, + 0x2bcc: 0x008c, 0x2bcd: 0x008c, 0x2bce: 0x00cc, 0x2bcf: 0x00cc, 0x2bd0: 0x008c, 0x2bd1: 0x00cc, + 0x2bd2: 0x008c, 0x2bd3: 0x00cc, 0x2bd4: 0x00cc, 0x2bd5: 0x008c, 0x2bd6: 0x008c, 0x2bd7: 0x008c, + 0x2bd8: 0x008c, 0x2bd9: 0x008c, 0x2bda: 0x008c, 0x2bdb: 0x008c, 0x2bdc: 0x008c, 0x2bdd: 0x008c, + 0x2bde: 0x008c, 0x2bdf: 0x00cc, 0x2be0: 0x008c, 0x2be1: 0x00cc, 0x2be2: 0x008c, 0x2be3: 0x00cc, + 0x2be4: 0x00cc, 0x2be5: 0x008c, 0x2be6: 0x008c, 0x2be7: 0x00cc, 0x2be8: 0x00cc, 0x2be9: 0x00cc, + 0x2bea: 0x008c, 0x2beb: 0x008c, 0x2bec: 0x008c, 0x2bed: 0x008c, 0x2bee: 0x008c, 0x2bef: 0x008c, + 0x2bf0: 0x008c, 0x2bf1: 0x008c, 0x2bf2: 0x008c, 0x2bf3: 0x008c, 0x2bf4: 0x008c, 0x2bf5: 0x008c, + 0x2bf6: 0x008c, 0x2bf7: 0x008c, 0x2bf8: 0x008c, 0x2bf9: 0x008c, 0x2bfa: 0x008c, 0x2bfb: 0x008c, + 0x2bfc: 0x008c, 0x2bfd: 0x008c, 0x2bfe: 0x008c, 0x2bff: 0x008c, + // Block 0xb0, offset 0x2c00 + 0x2c00: 0x008c, 0x2c01: 0x008c, 0x2c02: 0x008c, 0x2c03: 0x008c, 0x2c04: 0x008c, 0x2c05: 0x008c, + 0x2c06: 0x008c, 0x2c07: 0x008c, 0x2c08: 0x008c, 0x2c09: 0x008c, 0x2c0a: 0x008c, 0x2c0b: 0x008c, + 0x2c0c: 0x008c, 0x2c0d: 0x008c, 0x2c0e: 0x008c, 0x2c0f: 0x008c, 0x2c10: 0x008c, 0x2c11: 0x008c, + 0x2c12: 0x008c, 0x2c13: 0x008c, 0x2c14: 0x008c, 0x2c15: 0x008c, 0x2c16: 0x008c, 0x2c17: 0x008c, + 0x2c18: 0x008c, 0x2c19: 0x008c, 0x2c1a: 0x008c, 0x2c1b: 0x008c, 0x2c1c: 0x008c, 0x2c1d: 0x008c, + 0x2c1e: 0x008c, 0x2c1f: 0x008c, 0x2c20: 0x008c, 0x2c21: 0x008c, 0x2c22: 0x008c, 0x2c23: 0x008c, + 0x2c24: 0x008c, 0x2c25: 0x008c, 0x2c26: 0x008c, 0x2c27: 0x008c, 0x2c28: 0x008c, 0x2c29: 0x008c, + 0x2c2a: 0x008c, 0x2c2b: 0x008c, 0x2c2c: 0x008c, 0x2c2d: 0x008c, + 0x2c30: 0x008c, 0x2c31: 0x008c, 0x2c32: 0x008c, 0x2c33: 0x008c, 0x2c34: 0x008c, 0x2c35: 0x008c, + 0x2c36: 0x008c, 0x2c37: 0x008c, 0x2c38: 0x008c, 0x2c39: 0x008c, 0x2c3a: 0x008c, 0x2c3b: 0x008c, + 0x2c3c: 0x008c, 0x2c3d: 0x008c, 0x2c3e: 0x008c, 0x2c3f: 0x008c, + // Block 0xb1, offset 0x2c40 + 0x2c40: 0x008c, 0x2c41: 0x008c, 0x2c42: 0x008c, 0x2c43: 0x008c, 0x2c44: 0x008c, 0x2c45: 0x008c, + 0x2c46: 0x008c, 0x2c47: 0x008c, 0x2c48: 0x008c, 0x2c49: 0x008c, 0x2c4a: 0x008c, 0x2c4b: 0x008c, + 0x2c4c: 0x008c, 0x2c4d: 0x008c, 0x2c4e: 0x008c, 0x2c4f: 0x008c, 0x2c50: 0x008c, 0x2c51: 0x008c, + 0x2c52: 0x008c, 0x2c53: 0x008c, 0x2c54: 0x008c, 0x2c55: 0x008c, 0x2c56: 0x008c, 0x2c57: 0x008c, + 0x2c58: 0x008c, 0x2c59: 0x008c, + // Block 0xb2, offset 0x2c80 + 0x2c80: 0x0080, 0x2c81: 0x0080, 0x2c82: 0x0080, 0x2c83: 0x0080, 0x2c84: 0x0080, 0x2c85: 0x0080, + 0x2c86: 0x0080, + 0x2c93: 0x0080, 0x2c94: 0x0080, 0x2c95: 0x0080, 0x2c96: 0x0080, 0x2c97: 0x0080, + 0x2c9d: 0x008a, + 0x2c9e: 0x00cb, 0x2c9f: 0x008a, 0x2ca0: 0x008a, 0x2ca1: 0x008a, 0x2ca2: 0x008a, 0x2ca3: 0x008a, + 0x2ca4: 0x008a, 0x2ca5: 0x008a, 0x2ca6: 0x008a, 0x2ca7: 0x008a, 0x2ca8: 0x008a, 0x2ca9: 0x008a, + 0x2caa: 0x008a, 0x2cab: 0x008a, 0x2cac: 0x008a, 0x2cad: 0x008a, 0x2cae: 0x008a, 0x2caf: 0x008a, + 0x2cb0: 0x008a, 0x2cb1: 0x008a, 0x2cb2: 0x008a, 0x2cb3: 0x008a, 0x2cb4: 0x008a, 0x2cb5: 0x008a, + 0x2cb6: 0x008a, 0x2cb8: 0x008a, 0x2cb9: 0x008a, 0x2cba: 0x008a, 0x2cbb: 0x008a, + 0x2cbc: 0x008a, 0x2cbe: 0x008a, + // Block 0xb3, offset 0x2cc0 + 0x2cc0: 0x008a, 0x2cc1: 0x008a, 0x2cc3: 0x008a, 0x2cc4: 0x008a, + 0x2cc6: 0x008a, 0x2cc7: 0x008a, 0x2cc8: 0x008a, 0x2cc9: 0x008a, 0x2cca: 0x008a, 0x2ccb: 0x008a, + 0x2ccc: 0x008a, 0x2ccd: 0x008a, 0x2cce: 0x008a, 0x2ccf: 0x008a, 0x2cd0: 0x0080, 0x2cd1: 0x0080, + 0x2cd2: 0x0080, 0x2cd3: 0x0080, 0x2cd4: 0x0080, 0x2cd5: 0x0080, 0x2cd6: 0x0080, 0x2cd7: 0x0080, + 0x2cd8: 0x0080, 0x2cd9: 0x0080, 0x2cda: 0x0080, 0x2cdb: 0x0080, 0x2cdc: 0x0080, 0x2cdd: 0x0080, + 0x2cde: 0x0080, 0x2cdf: 0x0080, 0x2ce0: 0x0080, 0x2ce1: 0x0080, 0x2ce2: 0x0080, 0x2ce3: 0x0080, + 0x2ce4: 0x0080, 0x2ce5: 0x0080, 0x2ce6: 0x0080, 0x2ce7: 0x0080, 0x2ce8: 0x0080, 0x2ce9: 0x0080, + 0x2cea: 0x0080, 0x2ceb: 0x0080, 0x2cec: 0x0080, 0x2ced: 0x0080, 0x2cee: 0x0080, 0x2cef: 0x0080, + 0x2cf0: 0x0080, 0x2cf1: 0x0080, 0x2cf2: 0x0080, 0x2cf3: 0x0080, 0x2cf4: 0x0080, 0x2cf5: 0x0080, + 0x2cf6: 0x0080, 0x2cf7: 0x0080, 0x2cf8: 0x0080, 0x2cf9: 0x0080, 0x2cfa: 0x0080, 0x2cfb: 0x0080, + 0x2cfc: 0x0080, 0x2cfd: 0x0080, 0x2cfe: 0x0080, 0x2cff: 0x0080, + // Block 0xb4, offset 0x2d00 + 0x2d00: 0x0080, 0x2d01: 0x0080, + 0x2d13: 0x0080, 0x2d14: 0x0080, 0x2d15: 0x0080, 0x2d16: 0x0080, 0x2d17: 0x0080, + 0x2d18: 0x0080, 0x2d19: 0x0080, 0x2d1a: 0x0080, 0x2d1b: 0x0080, 0x2d1c: 0x0080, 0x2d1d: 0x0080, + 0x2d1e: 0x0080, 0x2d1f: 0x0080, 0x2d20: 0x0080, 0x2d21: 0x0080, 0x2d22: 0x0080, 0x2d23: 0x0080, + 0x2d24: 0x0080, 0x2d25: 0x0080, 0x2d26: 0x0080, 0x2d27: 0x0080, 0x2d28: 0x0080, 0x2d29: 0x0080, + 0x2d2a: 0x0080, 0x2d2b: 0x0080, 0x2d2c: 0x0080, 0x2d2d: 0x0080, 0x2d2e: 0x0080, 0x2d2f: 0x0080, + 0x2d30: 0x0080, 0x2d31: 0x0080, 0x2d32: 0x0080, 0x2d33: 0x0080, 0x2d34: 0x0080, 0x2d35: 0x0080, + 0x2d36: 0x0080, 0x2d37: 0x0080, 0x2d38: 0x0080, 0x2d39: 0x0080, 0x2d3a: 0x0080, 0x2d3b: 0x0080, + 0x2d3c: 0x0080, 0x2d3d: 0x0080, 0x2d3e: 0x0080, 0x2d3f: 0x0080, + // Block 0xb5, offset 0x2d40 + 0x2d50: 0x0080, 0x2d51: 0x0080, + 0x2d52: 0x0080, 0x2d53: 0x0080, 0x2d54: 0x0080, 0x2d55: 0x0080, 0x2d56: 0x0080, 0x2d57: 0x0080, + 0x2d58: 0x0080, 0x2d59: 0x0080, 0x2d5a: 0x0080, 0x2d5b: 0x0080, 0x2d5c: 0x0080, 0x2d5d: 0x0080, + 0x2d5e: 0x0080, 0x2d5f: 0x0080, 0x2d60: 0x0080, 0x2d61: 0x0080, 0x2d62: 0x0080, 0x2d63: 0x0080, + 0x2d64: 0x0080, 0x2d65: 0x0080, 0x2d66: 0x0080, 0x2d67: 0x0080, 0x2d68: 0x0080, 0x2d69: 0x0080, + 0x2d6a: 0x0080, 0x2d6b: 0x0080, 0x2d6c: 0x0080, 0x2d6d: 0x0080, 0x2d6e: 0x0080, 0x2d6f: 0x0080, + 0x2d70: 0x0080, 0x2d71: 0x0080, 0x2d72: 0x0080, 0x2d73: 0x0080, 0x2d74: 0x0080, 0x2d75: 0x0080, + 0x2d76: 0x0080, 0x2d77: 0x0080, 0x2d78: 0x0080, 0x2d79: 0x0080, 0x2d7a: 0x0080, 0x2d7b: 0x0080, + 0x2d7c: 0x0080, 0x2d7d: 0x0080, 0x2d7e: 0x0080, 0x2d7f: 0x0080, + // Block 0xb6, offset 0x2d80 + 0x2d80: 0x0080, 0x2d81: 0x0080, 0x2d82: 0x0080, 0x2d83: 0x0080, 0x2d84: 0x0080, 0x2d85: 0x0080, + 0x2d86: 0x0080, 0x2d87: 0x0080, 0x2d88: 0x0080, 0x2d89: 0x0080, 0x2d8a: 0x0080, 0x2d8b: 0x0080, + 0x2d8c: 0x0080, 0x2d8d: 0x0080, 0x2d8e: 0x0080, 0x2d8f: 0x0080, + 0x2d92: 0x0080, 0x2d93: 0x0080, 0x2d94: 0x0080, 0x2d95: 0x0080, 0x2d96: 0x0080, 0x2d97: 0x0080, + 0x2d98: 0x0080, 0x2d99: 0x0080, 0x2d9a: 0x0080, 0x2d9b: 0x0080, 0x2d9c: 0x0080, 0x2d9d: 0x0080, + 0x2d9e: 0x0080, 0x2d9f: 0x0080, 0x2da0: 0x0080, 0x2da1: 0x0080, 0x2da2: 0x0080, 0x2da3: 0x0080, + 0x2da4: 0x0080, 0x2da5: 0x0080, 0x2da6: 0x0080, 0x2da7: 0x0080, 0x2da8: 0x0080, 0x2da9: 0x0080, + 0x2daa: 0x0080, 0x2dab: 0x0080, 0x2dac: 0x0080, 0x2dad: 0x0080, 0x2dae: 0x0080, 0x2daf: 0x0080, + 0x2db0: 0x0080, 0x2db1: 0x0080, 0x2db2: 0x0080, 0x2db3: 0x0080, 0x2db4: 0x0080, 0x2db5: 0x0080, + 0x2db6: 0x0080, 0x2db7: 0x0080, 0x2db8: 0x0080, 0x2db9: 0x0080, 0x2dba: 0x0080, 0x2dbb: 0x0080, + 0x2dbc: 0x0080, 0x2dbd: 0x0080, 0x2dbe: 0x0080, 0x2dbf: 0x0080, + // Block 0xb7, offset 0x2dc0 + 0x2dc0: 0x0080, 0x2dc1: 0x0080, 0x2dc2: 0x0080, 0x2dc3: 0x0080, 0x2dc4: 0x0080, 0x2dc5: 0x0080, + 0x2dc6: 0x0080, 0x2dc7: 0x0080, + 0x2df0: 0x0080, 0x2df1: 0x0080, 0x2df2: 0x0080, 0x2df3: 0x0080, 0x2df4: 0x0080, 0x2df5: 0x0080, + 0x2df6: 0x0080, 0x2df7: 0x0080, 0x2df8: 0x0080, 0x2df9: 0x0080, 0x2dfa: 0x0080, 0x2dfb: 0x0080, + 0x2dfc: 0x0080, 0x2dfd: 0x0080, + // Block 0xb8, offset 0x2e00 + 0x2e00: 0x0040, 0x2e01: 0x0040, 0x2e02: 0x0040, 0x2e03: 0x0040, 0x2e04: 0x0040, 0x2e05: 0x0040, + 0x2e06: 0x0040, 0x2e07: 0x0040, 0x2e08: 0x0040, 0x2e09: 0x0040, 0x2e0a: 0x0040, 0x2e0b: 0x0040, + 0x2e0c: 0x0040, 0x2e0d: 0x0040, 0x2e0e: 0x0040, 0x2e0f: 0x0040, 0x2e10: 0x0080, 0x2e11: 0x0080, + 0x2e12: 0x0080, 0x2e13: 0x0080, 0x2e14: 0x0080, 0x2e15: 0x0080, 0x2e16: 0x0080, 0x2e17: 0x0080, + 0x2e18: 0x0080, 0x2e19: 0x0080, + 0x2e20: 0x00c3, 0x2e21: 0x00c3, 0x2e22: 0x00c3, 0x2e23: 0x00c3, + 0x2e24: 0x00c3, 0x2e25: 0x00c3, 0x2e26: 0x00c3, 0x2e27: 0x00c3, 0x2e28: 0x00c3, 0x2e29: 0x00c3, + 0x2e2a: 0x00c3, 0x2e2b: 0x00c3, 0x2e2c: 0x00c3, 0x2e2d: 0x00c3, 0x2e2e: 0x00c3, 0x2e2f: 0x00c3, + 0x2e30: 0x0080, 0x2e31: 0x0080, 0x2e32: 0x0080, 0x2e33: 0x0080, 0x2e34: 0x0080, 0x2e35: 0x0080, + 0x2e36: 0x0080, 0x2e37: 0x0080, 0x2e38: 0x0080, 0x2e39: 0x0080, 0x2e3a: 0x0080, 0x2e3b: 0x0080, + 0x2e3c: 0x0080, 0x2e3d: 0x0080, 0x2e3e: 0x0080, 0x2e3f: 0x0080, + // Block 0xb9, offset 0x2e40 + 0x2e40: 0x0080, 0x2e41: 0x0080, 0x2e42: 0x0080, 0x2e43: 0x0080, 0x2e44: 0x0080, 0x2e45: 0x0080, + 0x2e46: 0x0080, 0x2e47: 0x0080, 0x2e48: 0x0080, 0x2e49: 0x0080, 0x2e4a: 0x0080, 0x2e4b: 0x0080, + 0x2e4c: 0x0080, 0x2e4d: 0x0080, 0x2e4e: 0x0080, 0x2e4f: 0x0080, 0x2e50: 0x0080, 0x2e51: 0x0080, + 0x2e52: 0x0080, 0x2e54: 0x0080, 0x2e55: 0x0080, 0x2e56: 0x0080, 0x2e57: 0x0080, + 0x2e58: 0x0080, 0x2e59: 0x0080, 0x2e5a: 0x0080, 0x2e5b: 0x0080, 0x2e5c: 0x0080, 0x2e5d: 0x0080, + 0x2e5e: 0x0080, 0x2e5f: 0x0080, 0x2e60: 0x0080, 0x2e61: 0x0080, 0x2e62: 0x0080, 0x2e63: 0x0080, + 0x2e64: 0x0080, 0x2e65: 0x0080, 0x2e66: 0x0080, 0x2e68: 0x0080, 0x2e69: 0x0080, + 0x2e6a: 0x0080, 0x2e6b: 0x0080, + 0x2e70: 0x0080, 0x2e71: 0x0080, 0x2e72: 0x0080, 0x2e73: 0x00c0, 0x2e74: 0x0080, + 0x2e76: 0x0080, 0x2e77: 0x0080, 0x2e78: 0x0080, 0x2e79: 0x0080, 0x2e7a: 0x0080, 0x2e7b: 0x0080, + 0x2e7c: 0x0080, 0x2e7d: 0x0080, 0x2e7e: 0x0080, 0x2e7f: 0x0080, + // Block 0xba, offset 0x2e80 + 0x2e80: 0x0080, 0x2e81: 0x0080, 0x2e82: 0x0080, 0x2e83: 0x0080, 0x2e84: 0x0080, 0x2e85: 0x0080, + 0x2e86: 0x0080, 0x2e87: 0x0080, 0x2e88: 0x0080, 0x2e89: 0x0080, 0x2e8a: 0x0080, 0x2e8b: 0x0080, + 0x2e8c: 0x0080, 0x2e8d: 0x0080, 0x2e8e: 0x0080, 0x2e8f: 0x0080, 0x2e90: 0x0080, 0x2e91: 0x0080, + 0x2e92: 0x0080, 0x2e93: 0x0080, 0x2e94: 0x0080, 0x2e95: 0x0080, 0x2e96: 0x0080, 0x2e97: 0x0080, + 0x2e98: 0x0080, 0x2e99: 0x0080, 0x2e9a: 0x0080, 0x2e9b: 0x0080, 0x2e9c: 0x0080, 0x2e9d: 0x0080, + 0x2e9e: 0x0080, 0x2e9f: 0x0080, 0x2ea0: 0x0080, 0x2ea1: 0x0080, 0x2ea2: 0x0080, 0x2ea3: 0x0080, + 0x2ea4: 0x0080, 0x2ea5: 0x0080, 0x2ea6: 0x0080, 0x2ea7: 0x0080, 0x2ea8: 0x0080, 0x2ea9: 0x0080, + 0x2eaa: 0x0080, 0x2eab: 0x0080, 0x2eac: 0x0080, 0x2ead: 0x0080, 0x2eae: 0x0080, 0x2eaf: 0x0080, + 0x2eb0: 0x0080, 0x2eb1: 0x0080, 0x2eb2: 0x0080, 0x2eb3: 0x0080, 0x2eb4: 0x0080, 0x2eb5: 0x0080, + 0x2eb6: 0x0080, 0x2eb7: 0x0080, 0x2eb8: 0x0080, 0x2eb9: 0x0080, 0x2eba: 0x0080, 0x2ebb: 0x0080, + 0x2ebc: 0x0080, 0x2ebf: 0x0040, + // Block 0xbb, offset 0x2ec0 + 0x2ec1: 0x0080, 0x2ec2: 0x0080, 0x2ec3: 0x0080, 0x2ec4: 0x0080, 0x2ec5: 0x0080, + 0x2ec6: 0x0080, 0x2ec7: 0x0080, 0x2ec8: 0x0080, 0x2ec9: 0x0080, 0x2eca: 0x0080, 0x2ecb: 0x0080, + 0x2ecc: 0x0080, 0x2ecd: 0x0080, 0x2ece: 0x0080, 0x2ecf: 0x0080, 0x2ed0: 0x0080, 0x2ed1: 0x0080, + 0x2ed2: 0x0080, 0x2ed3: 0x0080, 0x2ed4: 0x0080, 0x2ed5: 0x0080, 0x2ed6: 0x0080, 0x2ed7: 0x0080, + 0x2ed8: 0x0080, 0x2ed9: 0x0080, 0x2eda: 0x0080, 0x2edb: 0x0080, 0x2edc: 0x0080, 0x2edd: 0x0080, + 0x2ede: 0x0080, 0x2edf: 0x0080, 0x2ee0: 0x0080, 0x2ee1: 0x0080, 0x2ee2: 0x0080, 0x2ee3: 0x0080, + 0x2ee4: 0x0080, 0x2ee5: 0x0080, 0x2ee6: 0x0080, 0x2ee7: 0x0080, 0x2ee8: 0x0080, 0x2ee9: 0x0080, + 0x2eea: 0x0080, 0x2eeb: 0x0080, 0x2eec: 0x0080, 0x2eed: 0x0080, 0x2eee: 0x0080, 0x2eef: 0x0080, + 0x2ef0: 0x0080, 0x2ef1: 0x0080, 0x2ef2: 0x0080, 0x2ef3: 0x0080, 0x2ef4: 0x0080, 0x2ef5: 0x0080, + 0x2ef6: 0x0080, 0x2ef7: 0x0080, 0x2ef8: 0x0080, 0x2ef9: 0x0080, 0x2efa: 0x0080, 0x2efb: 0x0080, + 0x2efc: 0x0080, 0x2efd: 0x0080, 0x2efe: 0x0080, 0x2eff: 0x0080, + // Block 0xbc, offset 0x2f00 + 0x2f00: 0x0080, 0x2f01: 0x0080, 0x2f02: 0x0080, 0x2f03: 0x0080, 0x2f04: 0x0080, 0x2f05: 0x0080, + 0x2f06: 0x0080, 0x2f07: 0x0080, 0x2f08: 0x0080, 0x2f09: 0x0080, 0x2f0a: 0x0080, 0x2f0b: 0x0080, + 0x2f0c: 0x0080, 0x2f0d: 0x0080, 0x2f0e: 0x0080, 0x2f0f: 0x0080, 0x2f10: 0x0080, 0x2f11: 0x0080, + 0x2f12: 0x0080, 0x2f13: 0x0080, 0x2f14: 0x0080, 0x2f15: 0x0080, 0x2f16: 0x0080, 0x2f17: 0x0080, + 0x2f18: 0x0080, 0x2f19: 0x0080, 0x2f1a: 0x0080, 0x2f1b: 0x0080, 0x2f1c: 0x0080, 0x2f1d: 0x0080, + 0x2f1e: 0x0080, 0x2f1f: 0x0080, 0x2f20: 0x0080, 0x2f21: 0x0080, 0x2f22: 0x0080, 0x2f23: 0x0080, + 0x2f24: 0x0080, 0x2f25: 0x0080, 0x2f26: 0x008c, 0x2f27: 0x008c, 0x2f28: 0x008c, 0x2f29: 0x008c, + 0x2f2a: 0x008c, 0x2f2b: 0x008c, 0x2f2c: 0x008c, 0x2f2d: 0x008c, 0x2f2e: 0x008c, 0x2f2f: 0x008c, + 0x2f30: 0x0080, 0x2f31: 0x008c, 0x2f32: 0x008c, 0x2f33: 0x008c, 0x2f34: 0x008c, 0x2f35: 0x008c, + 0x2f36: 0x008c, 0x2f37: 0x008c, 0x2f38: 0x008c, 0x2f39: 0x008c, 0x2f3a: 0x008c, 0x2f3b: 0x008c, + 0x2f3c: 0x008c, 0x2f3d: 0x008c, 0x2f3e: 0x008c, 0x2f3f: 0x008c, + // Block 0xbd, offset 0x2f40 + 0x2f40: 0x008c, 0x2f41: 0x008c, 0x2f42: 0x008c, 0x2f43: 0x008c, 0x2f44: 0x008c, 0x2f45: 0x008c, + 0x2f46: 0x008c, 0x2f47: 0x008c, 0x2f48: 0x008c, 0x2f49: 0x008c, 0x2f4a: 0x008c, 0x2f4b: 0x008c, + 0x2f4c: 0x008c, 0x2f4d: 0x008c, 0x2f4e: 0x008c, 0x2f4f: 0x008c, 0x2f50: 0x008c, 0x2f51: 0x008c, + 0x2f52: 0x008c, 0x2f53: 0x008c, 0x2f54: 0x008c, 0x2f55: 0x008c, 0x2f56: 0x008c, 0x2f57: 0x008c, + 0x2f58: 0x008c, 0x2f59: 0x008c, 0x2f5a: 0x008c, 0x2f5b: 0x008c, 0x2f5c: 0x008c, 0x2f5d: 0x008c, + 0x2f5e: 0x0080, 0x2f5f: 0x0080, 0x2f60: 0x0040, 0x2f61: 0x0080, 0x2f62: 0x0080, 0x2f63: 0x0080, + 0x2f64: 0x0080, 0x2f65: 0x0080, 0x2f66: 0x0080, 0x2f67: 0x0080, 0x2f68: 0x0080, 0x2f69: 0x0080, + 0x2f6a: 0x0080, 0x2f6b: 0x0080, 0x2f6c: 0x0080, 0x2f6d: 0x0080, 0x2f6e: 0x0080, 0x2f6f: 0x0080, + 0x2f70: 0x0080, 0x2f71: 0x0080, 0x2f72: 0x0080, 0x2f73: 0x0080, 0x2f74: 0x0080, 0x2f75: 0x0080, + 0x2f76: 0x0080, 0x2f77: 0x0080, 0x2f78: 0x0080, 0x2f79: 0x0080, 0x2f7a: 0x0080, 0x2f7b: 0x0080, + 0x2f7c: 0x0080, 0x2f7d: 0x0080, 0x2f7e: 0x0080, + // Block 0xbe, offset 0x2f80 + 0x2f82: 0x0080, 0x2f83: 0x0080, 0x2f84: 0x0080, 0x2f85: 0x0080, + 0x2f86: 0x0080, 0x2f87: 0x0080, 0x2f8a: 0x0080, 0x2f8b: 0x0080, + 0x2f8c: 0x0080, 0x2f8d: 0x0080, 0x2f8e: 0x0080, 0x2f8f: 0x0080, + 0x2f92: 0x0080, 0x2f93: 0x0080, 0x2f94: 0x0080, 0x2f95: 0x0080, 0x2f96: 0x0080, 0x2f97: 0x0080, + 0x2f9a: 0x0080, 0x2f9b: 0x0080, 0x2f9c: 0x0080, + 0x2fa0: 0x0080, 0x2fa1: 0x0080, 0x2fa2: 0x0080, 0x2fa3: 0x0080, + 0x2fa4: 0x0080, 0x2fa5: 0x0080, 0x2fa6: 0x0080, 0x2fa8: 0x0080, 0x2fa9: 0x0080, + 0x2faa: 0x0080, 0x2fab: 0x0080, 0x2fac: 0x0080, 0x2fad: 0x0080, 0x2fae: 0x0080, + 0x2fb9: 0x0040, 0x2fba: 0x0040, 0x2fbb: 0x0040, + 0x2fbc: 0x0080, 0x2fbd: 0x0080, + // Block 0xbf, offset 0x2fc0 + 0x2fc0: 0x00c0, 0x2fc1: 0x00c0, 0x2fc2: 0x00c0, 0x2fc3: 0x00c0, 0x2fc4: 0x00c0, 0x2fc5: 0x00c0, + 0x2fc6: 0x00c0, 0x2fc7: 0x00c0, 0x2fc8: 0x00c0, 0x2fc9: 0x00c0, 0x2fca: 0x00c0, 0x2fcb: 0x00c0, + 0x2fcd: 0x00c0, 0x2fce: 0x00c0, 0x2fcf: 0x00c0, 0x2fd0: 0x00c0, 0x2fd1: 0x00c0, + 0x2fd2: 0x00c0, 0x2fd3: 0x00c0, 0x2fd4: 0x00c0, 0x2fd5: 0x00c0, 0x2fd6: 0x00c0, 0x2fd7: 0x00c0, + 0x2fd8: 0x00c0, 0x2fd9: 0x00c0, 0x2fda: 0x00c0, 0x2fdb: 0x00c0, 0x2fdc: 0x00c0, 0x2fdd: 0x00c0, + 0x2fde: 0x00c0, 0x2fdf: 0x00c0, 0x2fe0: 0x00c0, 0x2fe1: 0x00c0, 0x2fe2: 0x00c0, 0x2fe3: 0x00c0, + 0x2fe4: 0x00c0, 0x2fe5: 0x00c0, 0x2fe6: 0x00c0, 0x2fe8: 0x00c0, 0x2fe9: 0x00c0, + 0x2fea: 0x00c0, 0x2feb: 0x00c0, 0x2fec: 0x00c0, 0x2fed: 0x00c0, 0x2fee: 0x00c0, 0x2fef: 0x00c0, + 0x2ff0: 0x00c0, 0x2ff1: 0x00c0, 0x2ff2: 0x00c0, 0x2ff3: 0x00c0, 0x2ff4: 0x00c0, 0x2ff5: 0x00c0, + 0x2ff6: 0x00c0, 0x2ff7: 0x00c0, 0x2ff8: 0x00c0, 0x2ff9: 0x00c0, 0x2ffa: 0x00c0, + 0x2ffc: 0x00c0, 0x2ffd: 0x00c0, 0x2fff: 0x00c0, + // Block 0xc0, offset 0x3000 + 0x3000: 0x00c0, 0x3001: 0x00c0, 0x3002: 0x00c0, 0x3003: 0x00c0, 0x3004: 0x00c0, 0x3005: 0x00c0, + 0x3006: 0x00c0, 0x3007: 0x00c0, 0x3008: 0x00c0, 0x3009: 0x00c0, 0x300a: 0x00c0, 0x300b: 0x00c0, + 0x300c: 0x00c0, 0x300d: 0x00c0, 0x3010: 0x00c0, 0x3011: 0x00c0, + 0x3012: 0x00c0, 0x3013: 0x00c0, 0x3014: 0x00c0, 0x3015: 0x00c0, 0x3016: 0x00c0, 0x3017: 0x00c0, + 0x3018: 0x00c0, 0x3019: 0x00c0, 0x301a: 0x00c0, 0x301b: 0x00c0, 0x301c: 0x00c0, 0x301d: 0x00c0, + // Block 0xc1, offset 0x3040 + 0x3040: 0x00c0, 0x3041: 0x00c0, 0x3042: 0x00c0, 0x3043: 0x00c0, 0x3044: 0x00c0, 0x3045: 0x00c0, + 0x3046: 0x00c0, 0x3047: 0x00c0, 0x3048: 0x00c0, 0x3049: 0x00c0, 0x304a: 0x00c0, 0x304b: 0x00c0, + 0x304c: 0x00c0, 0x304d: 0x00c0, 0x304e: 0x00c0, 0x304f: 0x00c0, 0x3050: 0x00c0, 0x3051: 0x00c0, + 0x3052: 0x00c0, 0x3053: 0x00c0, 0x3054: 0x00c0, 0x3055: 0x00c0, 0x3056: 0x00c0, 0x3057: 0x00c0, + 0x3058: 0x00c0, 0x3059: 0x00c0, 0x305a: 0x00c0, 0x305b: 0x00c0, 0x305c: 0x00c0, 0x305d: 0x00c0, + 0x305e: 0x00c0, 0x305f: 0x00c0, 0x3060: 0x00c0, 0x3061: 0x00c0, 0x3062: 0x00c0, 0x3063: 0x00c0, + 0x3064: 0x00c0, 0x3065: 0x00c0, 0x3066: 0x00c0, 0x3067: 0x00c0, 0x3068: 0x00c0, 0x3069: 0x00c0, + 0x306a: 0x00c0, 0x306b: 0x00c0, 0x306c: 0x00c0, 0x306d: 0x00c0, 0x306e: 0x00c0, 0x306f: 0x00c0, + 0x3070: 0x00c0, 0x3071: 0x00c0, 0x3072: 0x00c0, 0x3073: 0x00c0, 0x3074: 0x00c0, 0x3075: 0x00c0, + 0x3076: 0x00c0, 0x3077: 0x00c0, 0x3078: 0x00c0, 0x3079: 0x00c0, 0x307a: 0x00c0, + // Block 0xc2, offset 0x3080 + 0x3080: 0x0080, 0x3081: 0x0080, 0x3082: 0x0080, + 0x3087: 0x0080, 0x3088: 0x0080, 0x3089: 0x0080, 0x308a: 0x0080, 0x308b: 0x0080, + 0x308c: 0x0080, 0x308d: 0x0080, 0x308e: 0x0080, 0x308f: 0x0080, 0x3090: 0x0080, 0x3091: 0x0080, + 0x3092: 0x0080, 0x3093: 0x0080, 0x3094: 0x0080, 0x3095: 0x0080, 0x3096: 0x0080, 0x3097: 0x0080, + 0x3098: 0x0080, 0x3099: 0x0080, 0x309a: 0x0080, 0x309b: 0x0080, 0x309c: 0x0080, 0x309d: 0x0080, + 0x309e: 0x0080, 0x309f: 0x0080, 0x30a0: 0x0080, 0x30a1: 0x0080, 0x30a2: 0x0080, 0x30a3: 0x0080, + 0x30a4: 0x0080, 0x30a5: 0x0080, 0x30a6: 0x0080, 0x30a7: 0x0080, 0x30a8: 0x0080, 0x30a9: 0x0080, + 0x30aa: 0x0080, 0x30ab: 0x0080, 0x30ac: 0x0080, 0x30ad: 0x0080, 0x30ae: 0x0080, 0x30af: 0x0080, + 0x30b0: 0x0080, 0x30b1: 0x0080, 0x30b2: 0x0080, 0x30b3: 0x0080, + 0x30b7: 0x0080, 0x30b8: 0x0080, 0x30b9: 0x0080, 0x30ba: 0x0080, 0x30bb: 0x0080, + 0x30bc: 0x0080, 0x30bd: 0x0080, 0x30be: 0x0080, 0x30bf: 0x0080, + // Block 0xc3, offset 0x30c0 + 0x30c0: 0x0088, 0x30c1: 0x0088, 0x30c2: 0x0088, 0x30c3: 0x0088, 0x30c4: 0x0088, 0x30c5: 0x0088, + 0x30c6: 0x0088, 0x30c7: 0x0088, 0x30c8: 0x0088, 0x30c9: 0x0088, 0x30ca: 0x0088, 0x30cb: 0x0088, + 0x30cc: 0x0088, 0x30cd: 0x0088, 0x30ce: 0x0088, 0x30cf: 0x0088, 0x30d0: 0x0088, 0x30d1: 0x0088, + 0x30d2: 0x0088, 0x30d3: 0x0088, 0x30d4: 0x0088, 0x30d5: 0x0088, 0x30d6: 0x0088, 0x30d7: 0x0088, + 0x30d8: 0x0088, 0x30d9: 0x0088, 0x30da: 0x0088, 0x30db: 0x0088, 0x30dc: 0x0088, 0x30dd: 0x0088, + 0x30de: 0x0088, 0x30df: 0x0088, 0x30e0: 0x0088, 0x30e1: 0x0088, 0x30e2: 0x0088, 0x30e3: 0x0088, + 0x30e4: 0x0088, 0x30e5: 0x0088, 0x30e6: 0x0088, 0x30e7: 0x0088, 0x30e8: 0x0088, 0x30e9: 0x0088, + 0x30ea: 0x0088, 0x30eb: 0x0088, 0x30ec: 0x0088, 0x30ed: 0x0088, 0x30ee: 0x0088, 0x30ef: 0x0088, + 0x30f0: 0x0088, 0x30f1: 0x0088, 0x30f2: 0x0088, 0x30f3: 0x0088, 0x30f4: 0x0088, 0x30f5: 0x0088, + 0x30f6: 0x0088, 0x30f7: 0x0088, 0x30f8: 0x0088, 0x30f9: 0x0088, 0x30fa: 0x0088, 0x30fb: 0x0088, + 0x30fc: 0x0088, 0x30fd: 0x0088, 0x30fe: 0x0088, 0x30ff: 0x0088, + // Block 0xc4, offset 0x3100 + 0x3100: 0x0088, 0x3101: 0x0088, 0x3102: 0x0088, 0x3103: 0x0088, 0x3104: 0x0088, 0x3105: 0x0088, + 0x3106: 0x0088, 0x3107: 0x0088, 0x3108: 0x0088, 0x3109: 0x0088, 0x310a: 0x0088, 0x310b: 0x0088, + 0x310c: 0x0088, 0x310d: 0x0088, 0x310e: 0x0088, 0x3110: 0x0080, 0x3111: 0x0080, + 0x3112: 0x0080, 0x3113: 0x0080, 0x3114: 0x0080, 0x3115: 0x0080, 0x3116: 0x0080, 0x3117: 0x0080, + 0x3118: 0x0080, 0x3119: 0x0080, 0x311a: 0x0080, 0x311b: 0x0080, + 0x3120: 0x0088, + // Block 0xc5, offset 0x3140 + 0x3150: 0x0080, 0x3151: 0x0080, + 0x3152: 0x0080, 0x3153: 0x0080, 0x3154: 0x0080, 0x3155: 0x0080, 0x3156: 0x0080, 0x3157: 0x0080, + 0x3158: 0x0080, 0x3159: 0x0080, 0x315a: 0x0080, 0x315b: 0x0080, 0x315c: 0x0080, 0x315d: 0x0080, + 0x315e: 0x0080, 0x315f: 0x0080, 0x3160: 0x0080, 0x3161: 0x0080, 0x3162: 0x0080, 0x3163: 0x0080, + 0x3164: 0x0080, 0x3165: 0x0080, 0x3166: 0x0080, 0x3167: 0x0080, 0x3168: 0x0080, 0x3169: 0x0080, + 0x316a: 0x0080, 0x316b: 0x0080, 0x316c: 0x0080, 0x316d: 0x0080, 0x316e: 0x0080, 0x316f: 0x0080, + 0x3170: 0x0080, 0x3171: 0x0080, 0x3172: 0x0080, 0x3173: 0x0080, 0x3174: 0x0080, 0x3175: 0x0080, + 0x3176: 0x0080, 0x3177: 0x0080, 0x3178: 0x0080, 0x3179: 0x0080, 0x317a: 0x0080, 0x317b: 0x0080, + 0x317c: 0x0080, 0x317d: 0x00c3, + // Block 0xc6, offset 0x3180 + 0x3180: 0x00c0, 0x3181: 0x00c0, 0x3182: 0x00c0, 0x3183: 0x00c0, 0x3184: 0x00c0, 0x3185: 0x00c0, + 0x3186: 0x00c0, 0x3187: 0x00c0, 0x3188: 0x00c0, 0x3189: 0x00c0, 0x318a: 0x00c0, 0x318b: 0x00c0, + 0x318c: 0x00c0, 0x318d: 0x00c0, 0x318e: 0x00c0, 0x318f: 0x00c0, 0x3190: 0x00c0, 0x3191: 0x00c0, + 0x3192: 0x00c0, 0x3193: 0x00c0, 0x3194: 0x00c0, 0x3195: 0x00c0, 0x3196: 0x00c0, 0x3197: 0x00c0, + 0x3198: 0x00c0, 0x3199: 0x00c0, 0x319a: 0x00c0, 0x319b: 0x00c0, 0x319c: 0x00c0, + 0x31a0: 0x00c0, 0x31a1: 0x00c0, 0x31a2: 0x00c0, 0x31a3: 0x00c0, + 0x31a4: 0x00c0, 0x31a5: 0x00c0, 0x31a6: 0x00c0, 0x31a7: 0x00c0, 0x31a8: 0x00c0, 0x31a9: 0x00c0, + 0x31aa: 0x00c0, 0x31ab: 0x00c0, 0x31ac: 0x00c0, 0x31ad: 0x00c0, 0x31ae: 0x00c0, 0x31af: 0x00c0, + 0x31b0: 0x00c0, 0x31b1: 0x00c0, 0x31b2: 0x00c0, 0x31b3: 0x00c0, 0x31b4: 0x00c0, 0x31b5: 0x00c0, + 0x31b6: 0x00c0, 0x31b7: 0x00c0, 0x31b8: 0x00c0, 0x31b9: 0x00c0, 0x31ba: 0x00c0, 0x31bb: 0x00c0, + 0x31bc: 0x00c0, 0x31bd: 0x00c0, 0x31be: 0x00c0, 0x31bf: 0x00c0, + // Block 0xc7, offset 0x31c0 + 0x31c0: 0x00c0, 0x31c1: 0x00c0, 0x31c2: 0x00c0, 0x31c3: 0x00c0, 0x31c4: 0x00c0, 0x31c5: 0x00c0, + 0x31c6: 0x00c0, 0x31c7: 0x00c0, 0x31c8: 0x00c0, 0x31c9: 0x00c0, 0x31ca: 0x00c0, 0x31cb: 0x00c0, + 0x31cc: 0x00c0, 0x31cd: 0x00c0, 0x31ce: 0x00c0, 0x31cf: 0x00c0, 0x31d0: 0x00c0, + 0x31e0: 0x00c3, 0x31e1: 0x0080, 0x31e2: 0x0080, 0x31e3: 0x0080, + 0x31e4: 0x0080, 0x31e5: 0x0080, 0x31e6: 0x0080, 0x31e7: 0x0080, 0x31e8: 0x0080, 0x31e9: 0x0080, + 0x31ea: 0x0080, 0x31eb: 0x0080, 0x31ec: 0x0080, 0x31ed: 0x0080, 0x31ee: 0x0080, 0x31ef: 0x0080, + 0x31f0: 0x0080, 0x31f1: 0x0080, 0x31f2: 0x0080, 0x31f3: 0x0080, 0x31f4: 0x0080, 0x31f5: 0x0080, + 0x31f6: 0x0080, 0x31f7: 0x0080, 0x31f8: 0x0080, 0x31f9: 0x0080, 0x31fa: 0x0080, 0x31fb: 0x0080, + // Block 0xc8, offset 0x3200 + 0x3200: 0x00c0, 0x3201: 0x00c0, 0x3202: 0x00c0, 0x3203: 0x00c0, 0x3204: 0x00c0, 0x3205: 0x00c0, + 0x3206: 0x00c0, 0x3207: 0x00c0, 0x3208: 0x00c0, 0x3209: 0x00c0, 0x320a: 0x00c0, 0x320b: 0x00c0, + 0x320c: 0x00c0, 0x320d: 0x00c0, 0x320e: 0x00c0, 0x320f: 0x00c0, 0x3210: 0x00c0, 0x3211: 0x00c0, + 0x3212: 0x00c0, 0x3213: 0x00c0, 0x3214: 0x00c0, 0x3215: 0x00c0, 0x3216: 0x00c0, 0x3217: 0x00c0, + 0x3218: 0x00c0, 0x3219: 0x00c0, 0x321a: 0x00c0, 0x321b: 0x00c0, 0x321c: 0x00c0, 0x321d: 0x00c0, + 0x321e: 0x00c0, 0x321f: 0x00c0, 0x3220: 0x0080, 0x3221: 0x0080, 0x3222: 0x0080, 0x3223: 0x0080, + 0x3230: 0x00c0, 0x3231: 0x00c0, 0x3232: 0x00c0, 0x3233: 0x00c0, 0x3234: 0x00c0, 0x3235: 0x00c0, + 0x3236: 0x00c0, 0x3237: 0x00c0, 0x3238: 0x00c0, 0x3239: 0x00c0, 0x323a: 0x00c0, 0x323b: 0x00c0, + 0x323c: 0x00c0, 0x323d: 0x00c0, 0x323e: 0x00c0, 0x323f: 0x00c0, + // Block 0xc9, offset 0x3240 + 0x3240: 0x00c0, 0x3241: 0x0080, 0x3242: 0x00c0, 0x3243: 0x00c0, 0x3244: 0x00c0, 0x3245: 0x00c0, + 0x3246: 0x00c0, 0x3247: 0x00c0, 0x3248: 0x00c0, 0x3249: 0x00c0, 0x324a: 0x0080, + 0x3250: 0x00c0, 0x3251: 0x00c0, + 0x3252: 0x00c0, 0x3253: 0x00c0, 0x3254: 0x00c0, 0x3255: 0x00c0, 0x3256: 0x00c0, 0x3257: 0x00c0, + 0x3258: 0x00c0, 0x3259: 0x00c0, 0x325a: 0x00c0, 0x325b: 0x00c0, 0x325c: 0x00c0, 0x325d: 0x00c0, + 0x325e: 0x00c0, 0x325f: 0x00c0, 0x3260: 0x00c0, 0x3261: 0x00c0, 0x3262: 0x00c0, 0x3263: 0x00c0, + 0x3264: 0x00c0, 0x3265: 0x00c0, 0x3266: 0x00c0, 0x3267: 0x00c0, 0x3268: 0x00c0, 0x3269: 0x00c0, + 0x326a: 0x00c0, 0x326b: 0x00c0, 0x326c: 0x00c0, 0x326d: 0x00c0, 0x326e: 0x00c0, 0x326f: 0x00c0, + 0x3270: 0x00c0, 0x3271: 0x00c0, 0x3272: 0x00c0, 0x3273: 0x00c0, 0x3274: 0x00c0, 0x3275: 0x00c0, + 0x3276: 0x00c3, 0x3277: 0x00c3, 0x3278: 0x00c3, 0x3279: 0x00c3, 0x327a: 0x00c3, + // Block 0xca, offset 0x3280 + 0x3280: 0x00c0, 0x3281: 0x00c0, 0x3282: 0x00c0, 0x3283: 0x00c0, 0x3284: 0x00c0, 0x3285: 0x00c0, + 0x3286: 0x00c0, 0x3287: 0x00c0, 0x3288: 0x00c0, 0x3289: 0x00c0, 0x328a: 0x00c0, 0x328b: 0x00c0, + 0x328c: 0x00c0, 0x328d: 0x00c0, 0x328e: 0x00c0, 0x328f: 0x00c0, 0x3290: 0x00c0, 0x3291: 0x00c0, + 0x3292: 0x00c0, 0x3293: 0x00c0, 0x3294: 0x00c0, 0x3295: 0x00c0, 0x3296: 0x00c0, 0x3297: 0x00c0, + 0x3298: 0x00c0, 0x3299: 0x00c0, 0x329a: 0x00c0, 0x329b: 0x00c0, 0x329c: 0x00c0, 0x329d: 0x00c0, + 0x329f: 0x0080, 0x32a0: 0x00c0, 0x32a1: 0x00c0, 0x32a2: 0x00c0, 0x32a3: 0x00c0, + 0x32a4: 0x00c0, 0x32a5: 0x00c0, 0x32a6: 0x00c0, 0x32a7: 0x00c0, 0x32a8: 0x00c0, 0x32a9: 0x00c0, + 0x32aa: 0x00c0, 0x32ab: 0x00c0, 0x32ac: 0x00c0, 0x32ad: 0x00c0, 0x32ae: 0x00c0, 0x32af: 0x00c0, + 0x32b0: 0x00c0, 0x32b1: 0x00c0, 0x32b2: 0x00c0, 0x32b3: 0x00c0, 0x32b4: 0x00c0, 0x32b5: 0x00c0, + 0x32b6: 0x00c0, 0x32b7: 0x00c0, 0x32b8: 0x00c0, 0x32b9: 0x00c0, 0x32ba: 0x00c0, 0x32bb: 0x00c0, + 0x32bc: 0x00c0, 0x32bd: 0x00c0, 0x32be: 0x00c0, 0x32bf: 0x00c0, + // Block 0xcb, offset 0x32c0 + 0x32c0: 0x00c0, 0x32c1: 0x00c0, 0x32c2: 0x00c0, 0x32c3: 0x00c0, + 0x32c8: 0x00c0, 0x32c9: 0x00c0, 0x32ca: 0x00c0, 0x32cb: 0x00c0, + 0x32cc: 0x00c0, 0x32cd: 0x00c0, 0x32ce: 0x00c0, 0x32cf: 0x00c0, 0x32d0: 0x0080, 0x32d1: 0x0080, + 0x32d2: 0x0080, 0x32d3: 0x0080, 0x32d4: 0x0080, 0x32d5: 0x0080, + // Block 0xcc, offset 0x3300 + 0x3300: 0x00c0, 0x3301: 0x00c0, 0x3302: 0x00c0, 0x3303: 0x00c0, 0x3304: 0x00c0, 0x3305: 0x00c0, + 0x3306: 0x00c0, 0x3307: 0x00c0, 0x3308: 0x00c0, 0x3309: 0x00c0, 0x330a: 0x00c0, 0x330b: 0x00c0, + 0x330c: 0x00c0, 0x330d: 0x00c0, 0x330e: 0x00c0, 0x330f: 0x00c0, 0x3310: 0x00c0, 0x3311: 0x00c0, + 0x3312: 0x00c0, 0x3313: 0x00c0, 0x3314: 0x00c0, 0x3315: 0x00c0, 0x3316: 0x00c0, 0x3317: 0x00c0, + 0x3318: 0x00c0, 0x3319: 0x00c0, 0x331a: 0x00c0, 0x331b: 0x00c0, 0x331c: 0x00c0, 0x331d: 0x00c0, + 0x3320: 0x00c0, 0x3321: 0x00c0, 0x3322: 0x00c0, 0x3323: 0x00c0, + 0x3324: 0x00c0, 0x3325: 0x00c0, 0x3326: 0x00c0, 0x3327: 0x00c0, 0x3328: 0x00c0, 0x3329: 0x00c0, + 0x3330: 0x00c0, 0x3331: 0x00c0, 0x3332: 0x00c0, 0x3333: 0x00c0, 0x3334: 0x00c0, 0x3335: 0x00c0, + 0x3336: 0x00c0, 0x3337: 0x00c0, 0x3338: 0x00c0, 0x3339: 0x00c0, 0x333a: 0x00c0, 0x333b: 0x00c0, + 0x333c: 0x00c0, 0x333d: 0x00c0, 0x333e: 0x00c0, 0x333f: 0x00c0, + // Block 0xcd, offset 0x3340 + 0x3340: 0x00c0, 0x3341: 0x00c0, 0x3342: 0x00c0, 0x3343: 0x00c0, 0x3344: 0x00c0, 0x3345: 0x00c0, + 0x3346: 0x00c0, 0x3347: 0x00c0, 0x3348: 0x00c0, 0x3349: 0x00c0, 0x334a: 0x00c0, 0x334b: 0x00c0, + 0x334c: 0x00c0, 0x334d: 0x00c0, 0x334e: 0x00c0, 0x334f: 0x00c0, 0x3350: 0x00c0, 0x3351: 0x00c0, + 0x3352: 0x00c0, 0x3353: 0x00c0, + 0x3358: 0x00c0, 0x3359: 0x00c0, 0x335a: 0x00c0, 0x335b: 0x00c0, 0x335c: 0x00c0, 0x335d: 0x00c0, + 0x335e: 0x00c0, 0x335f: 0x00c0, 0x3360: 0x00c0, 0x3361: 0x00c0, 0x3362: 0x00c0, 0x3363: 0x00c0, + 0x3364: 0x00c0, 0x3365: 0x00c0, 0x3366: 0x00c0, 0x3367: 0x00c0, 0x3368: 0x00c0, 0x3369: 0x00c0, + 0x336a: 0x00c0, 0x336b: 0x00c0, 0x336c: 0x00c0, 0x336d: 0x00c0, 0x336e: 0x00c0, 0x336f: 0x00c0, + 0x3370: 0x00c0, 0x3371: 0x00c0, 0x3372: 0x00c0, 0x3373: 0x00c0, 0x3374: 0x00c0, 0x3375: 0x00c0, + 0x3376: 0x00c0, 0x3377: 0x00c0, 0x3378: 0x00c0, 0x3379: 0x00c0, 0x337a: 0x00c0, 0x337b: 0x00c0, + // Block 0xce, offset 0x3380 + 0x3380: 0x00c0, 0x3381: 0x00c0, 0x3382: 0x00c0, 0x3383: 0x00c0, 0x3384: 0x00c0, 0x3385: 0x00c0, + 0x3386: 0x00c0, 0x3387: 0x00c0, 0x3388: 0x00c0, 0x3389: 0x00c0, 0x338a: 0x00c0, 0x338b: 0x00c0, + 0x338c: 0x00c0, 0x338d: 0x00c0, 0x338e: 0x00c0, 0x338f: 0x00c0, 0x3390: 0x00c0, 0x3391: 0x00c0, + 0x3392: 0x00c0, 0x3393: 0x00c0, 0x3394: 0x00c0, 0x3395: 0x00c0, 0x3396: 0x00c0, 0x3397: 0x00c0, + 0x3398: 0x00c0, 0x3399: 0x00c0, 0x339a: 0x00c0, 0x339b: 0x00c0, 0x339c: 0x00c0, 0x339d: 0x00c0, + 0x339e: 0x00c0, 0x339f: 0x00c0, 0x33a0: 0x00c0, 0x33a1: 0x00c0, 0x33a2: 0x00c0, 0x33a3: 0x00c0, + 0x33a4: 0x00c0, 0x33a5: 0x00c0, 0x33a6: 0x00c0, 0x33a7: 0x00c0, + 0x33b0: 0x00c0, 0x33b1: 0x00c0, 0x33b2: 0x00c0, 0x33b3: 0x00c0, 0x33b4: 0x00c0, 0x33b5: 0x00c0, + 0x33b6: 0x00c0, 0x33b7: 0x00c0, 0x33b8: 0x00c0, 0x33b9: 0x00c0, 0x33ba: 0x00c0, 0x33bb: 0x00c0, + 0x33bc: 0x00c0, 0x33bd: 0x00c0, 0x33be: 0x00c0, 0x33bf: 0x00c0, + // Block 0xcf, offset 0x33c0 + 0x33c0: 0x00c0, 0x33c1: 0x00c0, 0x33c2: 0x00c0, 0x33c3: 0x00c0, 0x33c4: 0x00c0, 0x33c5: 0x00c0, + 0x33c6: 0x00c0, 0x33c7: 0x00c0, 0x33c8: 0x00c0, 0x33c9: 0x00c0, 0x33ca: 0x00c0, 0x33cb: 0x00c0, + 0x33cc: 0x00c0, 0x33cd: 0x00c0, 0x33ce: 0x00c0, 0x33cf: 0x00c0, 0x33d0: 0x00c0, 0x33d1: 0x00c0, + 0x33d2: 0x00c0, 0x33d3: 0x00c0, 0x33d4: 0x00c0, 0x33d5: 0x00c0, 0x33d6: 0x00c0, 0x33d7: 0x00c0, + 0x33d8: 0x00c0, 0x33d9: 0x00c0, 0x33da: 0x00c0, 0x33db: 0x00c0, 0x33dc: 0x00c0, 0x33dd: 0x00c0, + 0x33de: 0x00c0, 0x33df: 0x00c0, 0x33e0: 0x00c0, 0x33e1: 0x00c0, 0x33e2: 0x00c0, 0x33e3: 0x00c0, + 0x33ef: 0x0080, + // Block 0xd0, offset 0x3400 + 0x3400: 0x00c0, 0x3401: 0x00c0, 0x3402: 0x00c0, 0x3403: 0x00c0, 0x3404: 0x00c0, 0x3405: 0x00c0, + 0x3406: 0x00c0, 0x3407: 0x00c0, 0x3408: 0x00c0, 0x3409: 0x00c0, 0x340a: 0x00c0, 0x340b: 0x00c0, + 0x340c: 0x00c0, 0x340d: 0x00c0, 0x340e: 0x00c0, 0x340f: 0x00c0, 0x3410: 0x00c0, 0x3411: 0x00c0, + 0x3412: 0x00c0, 0x3413: 0x00c0, 0x3414: 0x00c0, 0x3415: 0x00c0, 0x3416: 0x00c0, 0x3417: 0x00c0, + 0x3418: 0x00c0, 0x3419: 0x00c0, 0x341a: 0x00c0, 0x341b: 0x00c0, 0x341c: 0x00c0, 0x341d: 0x00c0, + 0x341e: 0x00c0, 0x341f: 0x00c0, 0x3420: 0x00c0, 0x3421: 0x00c0, 0x3422: 0x00c0, 0x3423: 0x00c0, + 0x3424: 0x00c0, 0x3425: 0x00c0, 0x3426: 0x00c0, 0x3427: 0x00c0, 0x3428: 0x00c0, 0x3429: 0x00c0, + 0x342a: 0x00c0, 0x342b: 0x00c0, 0x342c: 0x00c0, 0x342d: 0x00c0, 0x342e: 0x00c0, 0x342f: 0x00c0, + 0x3430: 0x00c0, 0x3431: 0x00c0, 0x3432: 0x00c0, 0x3433: 0x00c0, 0x3434: 0x00c0, 0x3435: 0x00c0, + 0x3436: 0x00c0, + // Block 0xd1, offset 0x3440 + 0x3440: 0x00c0, 0x3441: 0x00c0, 0x3442: 0x00c0, 0x3443: 0x00c0, 0x3444: 0x00c0, 0x3445: 0x00c0, + 0x3446: 0x00c0, 0x3447: 0x00c0, 0x3448: 0x00c0, 0x3449: 0x00c0, 0x344a: 0x00c0, 0x344b: 0x00c0, + 0x344c: 0x00c0, 0x344d: 0x00c0, 0x344e: 0x00c0, 0x344f: 0x00c0, 0x3450: 0x00c0, 0x3451: 0x00c0, + 0x3452: 0x00c0, 0x3453: 0x00c0, 0x3454: 0x00c0, 0x3455: 0x00c0, + 0x3460: 0x00c0, 0x3461: 0x00c0, 0x3462: 0x00c0, 0x3463: 0x00c0, + 0x3464: 0x00c0, 0x3465: 0x00c0, 0x3466: 0x00c0, 0x3467: 0x00c0, + // Block 0xd2, offset 0x3480 + 0x3480: 0x00c0, 0x3481: 0x00c0, 0x3482: 0x00c0, 0x3483: 0x00c0, 0x3484: 0x00c0, 0x3485: 0x00c0, + 0x3488: 0x00c0, 0x348a: 0x00c0, 0x348b: 0x00c0, + 0x348c: 0x00c0, 0x348d: 0x00c0, 0x348e: 0x00c0, 0x348f: 0x00c0, 0x3490: 0x00c0, 0x3491: 0x00c0, + 0x3492: 0x00c0, 0x3493: 0x00c0, 0x3494: 0x00c0, 0x3495: 0x00c0, 0x3496: 0x00c0, 0x3497: 0x00c0, + 0x3498: 0x00c0, 0x3499: 0x00c0, 0x349a: 0x00c0, 0x349b: 0x00c0, 0x349c: 0x00c0, 0x349d: 0x00c0, + 0x349e: 0x00c0, 0x349f: 0x00c0, 0x34a0: 0x00c0, 0x34a1: 0x00c0, 0x34a2: 0x00c0, 0x34a3: 0x00c0, + 0x34a4: 0x00c0, 0x34a5: 0x00c0, 0x34a6: 0x00c0, 0x34a7: 0x00c0, 0x34a8: 0x00c0, 0x34a9: 0x00c0, + 0x34aa: 0x00c0, 0x34ab: 0x00c0, 0x34ac: 0x00c0, 0x34ad: 0x00c0, 0x34ae: 0x00c0, 0x34af: 0x00c0, + 0x34b0: 0x00c0, 0x34b1: 0x00c0, 0x34b2: 0x00c0, 0x34b3: 0x00c0, 0x34b4: 0x00c0, 0x34b5: 0x00c0, + 0x34b7: 0x00c0, 0x34b8: 0x00c0, + 0x34bc: 0x00c0, 0x34bf: 0x00c0, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x00c0, 0x34c1: 0x00c0, 0x34c2: 0x00c0, 0x34c3: 0x00c0, 0x34c4: 0x00c0, 0x34c5: 0x00c0, + 0x34c6: 0x00c0, 0x34c7: 0x00c0, 0x34c8: 0x00c0, 0x34c9: 0x00c0, 0x34ca: 0x00c0, 0x34cb: 0x00c0, + 0x34cc: 0x00c0, 0x34cd: 0x00c0, 0x34ce: 0x00c0, 0x34cf: 0x00c0, 0x34d0: 0x00c0, 0x34d1: 0x00c0, + 0x34d2: 0x00c0, 0x34d3: 0x00c0, 0x34d4: 0x00c0, 0x34d5: 0x00c0, 0x34d7: 0x0080, + 0x34d8: 0x0080, 0x34d9: 0x0080, 0x34da: 0x0080, 0x34db: 0x0080, 0x34dc: 0x0080, 0x34dd: 0x0080, + 0x34de: 0x0080, 0x34df: 0x0080, 0x34e0: 0x00c0, 0x34e1: 0x00c0, 0x34e2: 0x00c0, 0x34e3: 0x00c0, + 0x34e4: 0x00c0, 0x34e5: 0x00c0, 0x34e6: 0x00c0, 0x34e7: 0x00c0, 0x34e8: 0x00c0, 0x34e9: 0x00c0, + 0x34ea: 0x00c0, 0x34eb: 0x00c0, 0x34ec: 0x00c0, 0x34ed: 0x00c0, 0x34ee: 0x00c0, 0x34ef: 0x00c0, + 0x34f0: 0x00c0, 0x34f1: 0x00c0, 0x34f2: 0x00c0, 0x34f3: 0x00c0, 0x34f4: 0x00c0, 0x34f5: 0x00c0, + 0x34f6: 0x00c0, 0x34f7: 0x0080, 0x34f8: 0x0080, 0x34f9: 0x0080, 0x34fa: 0x0080, 0x34fb: 0x0080, + 0x34fc: 0x0080, 0x34fd: 0x0080, 0x34fe: 0x0080, 0x34ff: 0x0080, + // Block 0xd4, offset 0x3500 + 0x3500: 0x00c0, 0x3501: 0x00c0, 0x3502: 0x00c0, 0x3503: 0x00c0, 0x3504: 0x00c0, 0x3505: 0x00c0, + 0x3506: 0x00c0, 0x3507: 0x00c0, 0x3508: 0x00c0, 0x3509: 0x00c0, 0x350a: 0x00c0, 0x350b: 0x00c0, + 0x350c: 0x00c0, 0x350d: 0x00c0, 0x350e: 0x00c0, 0x350f: 0x00c0, 0x3510: 0x00c0, 0x3511: 0x00c0, + 0x3512: 0x00c0, 0x3513: 0x00c0, 0x3514: 0x00c0, 0x3515: 0x00c0, 0x3516: 0x00c0, 0x3517: 0x00c0, + 0x3518: 0x00c0, 0x3519: 0x00c0, 0x351a: 0x00c0, 0x351b: 0x00c0, 0x351c: 0x00c0, 0x351d: 0x00c0, + 0x351e: 0x00c0, + 0x3527: 0x0080, 0x3528: 0x0080, 0x3529: 0x0080, + 0x352a: 0x0080, 0x352b: 0x0080, 0x352c: 0x0080, 0x352d: 0x0080, 0x352e: 0x0080, 0x352f: 0x0080, + // Block 0xd5, offset 0x3540 + 0x3560: 0x00c0, 0x3561: 0x00c0, 0x3562: 0x00c0, 0x3563: 0x00c0, + 0x3564: 0x00c0, 0x3565: 0x00c0, 0x3566: 0x00c0, 0x3567: 0x00c0, 0x3568: 0x00c0, 0x3569: 0x00c0, + 0x356a: 0x00c0, 0x356b: 0x00c0, 0x356c: 0x00c0, 0x356d: 0x00c0, 0x356e: 0x00c0, 0x356f: 0x00c0, + 0x3570: 0x00c0, 0x3571: 0x00c0, 0x3572: 0x00c0, 0x3574: 0x00c0, 0x3575: 0x00c0, + 0x357b: 0x0080, + 0x357c: 0x0080, 0x357d: 0x0080, 0x357e: 0x0080, 0x357f: 0x0080, + // Block 0xd6, offset 0x3580 + 0x3580: 0x00c0, 0x3581: 0x00c0, 0x3582: 0x00c0, 0x3583: 0x00c0, 0x3584: 0x00c0, 0x3585: 0x00c0, + 0x3586: 0x00c0, 0x3587: 0x00c0, 0x3588: 0x00c0, 0x3589: 0x00c0, 0x358a: 0x00c0, 0x358b: 0x00c0, + 0x358c: 0x00c0, 0x358d: 0x00c0, 0x358e: 0x00c0, 0x358f: 0x00c0, 0x3590: 0x00c0, 0x3591: 0x00c0, + 0x3592: 0x00c0, 0x3593: 0x00c0, 0x3594: 0x00c0, 0x3595: 0x00c0, 0x3596: 0x0080, 0x3597: 0x0080, + 0x3598: 0x0080, 0x3599: 0x0080, 0x359a: 0x0080, 0x359b: 0x0080, + 0x359f: 0x0080, 0x35a0: 0x00c0, 0x35a1: 0x00c0, 0x35a2: 0x00c0, 0x35a3: 0x00c0, + 0x35a4: 0x00c0, 0x35a5: 0x00c0, 0x35a6: 0x00c0, 0x35a7: 0x00c0, 0x35a8: 0x00c0, 0x35a9: 0x00c0, + 0x35aa: 0x00c0, 0x35ab: 0x00c0, 0x35ac: 0x00c0, 0x35ad: 0x00c0, 0x35ae: 0x00c0, 0x35af: 0x00c0, + 0x35b0: 0x00c0, 0x35b1: 0x00c0, 0x35b2: 0x00c0, 0x35b3: 0x00c0, 0x35b4: 0x00c0, 0x35b5: 0x00c0, + 0x35b6: 0x00c0, 0x35b7: 0x00c0, 0x35b8: 0x00c0, 0x35b9: 0x00c0, + 0x35bf: 0x0080, + // Block 0xd7, offset 0x35c0 + 0x35c0: 0x00c0, 0x35c1: 0x00c0, 0x35c2: 0x00c0, 0x35c3: 0x00c0, 0x35c4: 0x00c0, 0x35c5: 0x00c0, + 0x35c6: 0x00c0, 0x35c7: 0x00c0, 0x35c8: 0x00c0, 0x35c9: 0x00c0, 0x35ca: 0x00c0, 0x35cb: 0x00c0, + 0x35cc: 0x00c0, 0x35cd: 0x00c0, 0x35ce: 0x00c0, 0x35cf: 0x00c0, 0x35d0: 0x00c0, 0x35d1: 0x00c0, + 0x35d2: 0x00c0, 0x35d3: 0x00c0, 0x35d4: 0x00c0, 0x35d5: 0x00c0, 0x35d6: 0x00c0, 0x35d7: 0x00c0, + 0x35d8: 0x00c0, 0x35d9: 0x00c0, 0x35da: 0x00c0, 0x35db: 0x00c0, 0x35dc: 0x00c0, 0x35dd: 0x00c0, + 0x35de: 0x00c0, 0x35df: 0x00c0, 0x35e0: 0x00c0, 0x35e1: 0x00c0, 0x35e2: 0x00c0, 0x35e3: 0x00c0, + 0x35e4: 0x00c0, 0x35e5: 0x00c0, 0x35e6: 0x00c0, 0x35e7: 0x00c0, 0x35e8: 0x00c0, 0x35e9: 0x00c0, + 0x35ea: 0x00c0, 0x35eb: 0x00c0, 0x35ec: 0x00c0, 0x35ed: 0x00c0, 0x35ee: 0x00c0, 0x35ef: 0x00c0, + 0x35f0: 0x00c0, 0x35f1: 0x00c0, 0x35f2: 0x00c0, 0x35f3: 0x00c0, 0x35f4: 0x00c0, 0x35f5: 0x00c0, + 0x35f6: 0x00c0, 0x35f7: 0x00c0, + 0x35fc: 0x0080, 0x35fd: 0x0080, 0x35fe: 0x00c0, 0x35ff: 0x00c0, + // Block 0xd8, offset 0x3600 + 0x3600: 0x00c0, 0x3601: 0x00c3, 0x3602: 0x00c3, 0x3603: 0x00c3, 0x3605: 0x00c3, + 0x3606: 0x00c3, + 0x360c: 0x00c3, 0x360d: 0x00c3, 0x360e: 0x00c3, 0x360f: 0x00c3, 0x3610: 0x00c0, 0x3611: 0x00c0, + 0x3612: 0x00c0, 0x3613: 0x00c0, 0x3615: 0x00c0, 0x3616: 0x00c0, 0x3617: 0x00c0, + 0x3619: 0x00c0, 0x361a: 0x00c0, 0x361b: 0x00c0, 0x361c: 0x00c0, 0x361d: 0x00c0, + 0x361e: 0x00c0, 0x361f: 0x00c0, 0x3620: 0x00c0, 0x3621: 0x00c0, 0x3622: 0x00c0, 0x3623: 0x00c0, + 0x3624: 0x00c0, 0x3625: 0x00c0, 0x3626: 0x00c0, 0x3627: 0x00c0, 0x3628: 0x00c0, 0x3629: 0x00c0, + 0x362a: 0x00c0, 0x362b: 0x00c0, 0x362c: 0x00c0, 0x362d: 0x00c0, 0x362e: 0x00c0, 0x362f: 0x00c0, + 0x3630: 0x00c0, 0x3631: 0x00c0, 0x3632: 0x00c0, 0x3633: 0x00c0, + 0x3638: 0x00c3, 0x3639: 0x00c3, 0x363a: 0x00c3, + 0x363f: 0x00c6, + // Block 0xd9, offset 0x3640 + 0x3640: 0x0080, 0x3641: 0x0080, 0x3642: 0x0080, 0x3643: 0x0080, 0x3644: 0x0080, 0x3645: 0x0080, + 0x3646: 0x0080, 0x3647: 0x0080, + 0x3650: 0x0080, 0x3651: 0x0080, + 0x3652: 0x0080, 0x3653: 0x0080, 0x3654: 0x0080, 0x3655: 0x0080, 0x3656: 0x0080, 0x3657: 0x0080, + 0x3658: 0x0080, + 0x3660: 0x00c0, 0x3661: 0x00c0, 0x3662: 0x00c0, 0x3663: 0x00c0, + 0x3664: 0x00c0, 0x3665: 0x00c0, 0x3666: 0x00c0, 0x3667: 0x00c0, 0x3668: 0x00c0, 0x3669: 0x00c0, + 0x366a: 0x00c0, 0x366b: 0x00c0, 0x366c: 0x00c0, 0x366d: 0x00c0, 0x366e: 0x00c0, 0x366f: 0x00c0, + 0x3670: 0x00c0, 0x3671: 0x00c0, 0x3672: 0x00c0, 0x3673: 0x00c0, 0x3674: 0x00c0, 0x3675: 0x00c0, + 0x3676: 0x00c0, 0x3677: 0x00c0, 0x3678: 0x00c0, 0x3679: 0x00c0, 0x367a: 0x00c0, 0x367b: 0x00c0, + 0x367c: 0x00c0, 0x367d: 0x0080, 0x367e: 0x0080, 0x367f: 0x0080, + // Block 0xda, offset 0x3680 + 0x3680: 0x00c0, 0x3681: 0x00c0, 0x3682: 0x00c0, 0x3683: 0x00c0, 0x3684: 0x00c0, 0x3685: 0x00c0, + 0x3686: 0x00c0, 0x3687: 0x00c0, 0x3688: 0x00c0, 0x3689: 0x00c0, 0x368a: 0x00c0, 0x368b: 0x00c0, + 0x368c: 0x00c0, 0x368d: 0x00c0, 0x368e: 0x00c0, 0x368f: 0x00c0, 0x3690: 0x00c0, 0x3691: 0x00c0, + 0x3692: 0x00c0, 0x3693: 0x00c0, 0x3694: 0x00c0, 0x3695: 0x00c0, 0x3696: 0x00c0, 0x3697: 0x00c0, + 0x3698: 0x00c0, 0x3699: 0x00c0, 0x369a: 0x00c0, 0x369b: 0x00c0, 0x369c: 0x00c0, 0x369d: 0x0080, + 0x369e: 0x0080, 0x369f: 0x0080, + // Block 0xdb, offset 0x36c0 + 0x36c0: 0x00c2, 0x36c1: 0x00c2, 0x36c2: 0x00c2, 0x36c3: 0x00c2, 0x36c4: 0x00c2, 0x36c5: 0x00c4, + 0x36c6: 0x00c0, 0x36c7: 0x00c4, 0x36c8: 0x0080, 0x36c9: 0x00c4, 0x36ca: 0x00c4, 0x36cb: 0x00c0, + 0x36cc: 0x00c0, 0x36cd: 0x00c1, 0x36ce: 0x00c4, 0x36cf: 0x00c4, 0x36d0: 0x00c4, 0x36d1: 0x00c4, + 0x36d2: 0x00c4, 0x36d3: 0x00c2, 0x36d4: 0x00c2, 0x36d5: 0x00c2, 0x36d6: 0x00c2, 0x36d7: 0x00c1, + 0x36d8: 0x00c2, 0x36d9: 0x00c2, 0x36da: 0x00c2, 0x36db: 0x00c2, 0x36dc: 0x00c2, 0x36dd: 0x00c4, + 0x36de: 0x00c2, 0x36df: 0x00c2, 0x36e0: 0x00c2, 0x36e1: 0x00c4, 0x36e2: 0x00c0, 0x36e3: 0x00c0, + 0x36e4: 0x00c4, 0x36e5: 0x00c3, 0x36e6: 0x00c3, + 0x36eb: 0x0082, 0x36ec: 0x0082, 0x36ed: 0x0082, 0x36ee: 0x0082, 0x36ef: 0x0084, + 0x36f0: 0x0080, 0x36f1: 0x0080, 0x36f2: 0x0080, 0x36f3: 0x0080, 0x36f4: 0x0080, 0x36f5: 0x0080, + 0x36f6: 0x0080, + // Block 0xdc, offset 0x3700 + 0x3700: 0x00c0, 0x3701: 0x00c0, 0x3702: 0x00c0, 0x3703: 0x00c0, 0x3704: 0x00c0, 0x3705: 0x00c0, + 0x3706: 0x00c0, 0x3707: 0x00c0, 0x3708: 0x00c0, 0x3709: 0x00c0, 0x370a: 0x00c0, 0x370b: 0x00c0, + 0x370c: 0x00c0, 0x370d: 0x00c0, 0x370e: 0x00c0, 0x370f: 0x00c0, 0x3710: 0x00c0, 0x3711: 0x00c0, + 0x3712: 0x00c0, 0x3713: 0x00c0, 0x3714: 0x00c0, 0x3715: 0x00c0, 0x3716: 0x00c0, 0x3717: 0x00c0, + 0x3718: 0x00c0, 0x3719: 0x00c0, 0x371a: 0x00c0, 0x371b: 0x00c0, 0x371c: 0x00c0, 0x371d: 0x00c0, + 0x371e: 0x00c0, 0x371f: 0x00c0, 0x3720: 0x00c0, 0x3721: 0x00c0, 0x3722: 0x00c0, 0x3723: 0x00c0, + 0x3724: 0x00c0, 0x3725: 0x00c0, 0x3726: 0x00c0, 0x3727: 0x00c0, 0x3728: 0x00c0, 0x3729: 0x00c0, + 0x372a: 0x00c0, 0x372b: 0x00c0, 0x372c: 0x00c0, 0x372d: 0x00c0, 0x372e: 0x00c0, 0x372f: 0x00c0, + 0x3730: 0x00c0, 0x3731: 0x00c0, 0x3732: 0x00c0, 0x3733: 0x00c0, 0x3734: 0x00c0, 0x3735: 0x00c0, + 0x3739: 0x0080, 0x373a: 0x0080, 0x373b: 0x0080, + 0x373c: 0x0080, 0x373d: 0x0080, 0x373e: 0x0080, 0x373f: 0x0080, + // Block 0xdd, offset 0x3740 + 0x3740: 0x00c0, 0x3741: 0x00c0, 0x3742: 0x00c0, 0x3743: 0x00c0, 0x3744: 0x00c0, 0x3745: 0x00c0, + 0x3746: 0x00c0, 0x3747: 0x00c0, 0x3748: 0x00c0, 0x3749: 0x00c0, 0x374a: 0x00c0, 0x374b: 0x00c0, + 0x374c: 0x00c0, 0x374d: 0x00c0, 0x374e: 0x00c0, 0x374f: 0x00c0, 0x3750: 0x00c0, 0x3751: 0x00c0, + 0x3752: 0x00c0, 0x3753: 0x00c0, 0x3754: 0x00c0, 0x3755: 0x00c0, + 0x3758: 0x0080, 0x3759: 0x0080, 0x375a: 0x0080, 0x375b: 0x0080, 0x375c: 0x0080, 0x375d: 0x0080, + 0x375e: 0x0080, 0x375f: 0x0080, 0x3760: 0x00c0, 0x3761: 0x00c0, 0x3762: 0x00c0, 0x3763: 0x00c0, + 0x3764: 0x00c0, 0x3765: 0x00c0, 0x3766: 0x00c0, 0x3767: 0x00c0, 0x3768: 0x00c0, 0x3769: 0x00c0, + 0x376a: 0x00c0, 0x376b: 0x00c0, 0x376c: 0x00c0, 0x376d: 0x00c0, 0x376e: 0x00c0, 0x376f: 0x00c0, + 0x3770: 0x00c0, 0x3771: 0x00c0, 0x3772: 0x00c0, + 0x3778: 0x0080, 0x3779: 0x0080, 0x377a: 0x0080, 0x377b: 0x0080, + 0x377c: 0x0080, 0x377d: 0x0080, 0x377e: 0x0080, 0x377f: 0x0080, + // Block 0xde, offset 0x3780 + 0x3780: 0x00c2, 0x3781: 0x00c4, 0x3782: 0x00c2, 0x3783: 0x00c4, 0x3784: 0x00c4, 0x3785: 0x00c4, + 0x3786: 0x00c2, 0x3787: 0x00c2, 0x3788: 0x00c2, 0x3789: 0x00c4, 0x378a: 0x00c2, 0x378b: 0x00c2, + 0x378c: 0x00c4, 0x378d: 0x00c2, 0x378e: 0x00c4, 0x378f: 0x00c4, 0x3790: 0x00c2, 0x3791: 0x00c4, + 0x3799: 0x0080, 0x379a: 0x0080, 0x379b: 0x0080, 0x379c: 0x0080, + 0x37a9: 0x0084, + 0x37aa: 0x0084, 0x37ab: 0x0084, 0x37ac: 0x0084, 0x37ad: 0x0082, 0x37ae: 0x0082, 0x37af: 0x0080, + // Block 0xdf, offset 0x37c0 + 0x37c0: 0x00c0, 0x37c1: 0x00c0, 0x37c2: 0x00c0, 0x37c3: 0x00c0, 0x37c4: 0x00c0, 0x37c5: 0x00c0, + 0x37c6: 0x00c0, 0x37c7: 0x00c0, 0x37c8: 0x00c0, 0x37c9: 0x00c0, 0x37ca: 0x00c0, 0x37cb: 0x00c0, + 0x37cc: 0x00c0, 0x37cd: 0x00c0, 0x37ce: 0x00c0, 0x37cf: 0x00c0, 0x37d0: 0x00c0, 0x37d1: 0x00c0, + 0x37d2: 0x00c0, 0x37d3: 0x00c0, 0x37d4: 0x00c0, 0x37d5: 0x00c0, 0x37d6: 0x00c0, 0x37d7: 0x00c0, + 0x37d8: 0x00c0, 0x37d9: 0x00c0, 0x37da: 0x00c0, 0x37db: 0x00c0, 0x37dc: 0x00c0, 0x37dd: 0x00c0, + 0x37de: 0x00c0, 0x37df: 0x00c0, 0x37e0: 0x00c0, 0x37e1: 0x00c0, 0x37e2: 0x00c0, 0x37e3: 0x00c0, + 0x37e4: 0x00c0, 0x37e5: 0x00c0, 0x37e6: 0x00c0, 0x37e7: 0x00c0, 0x37e8: 0x00c0, 0x37e9: 0x00c0, + 0x37ea: 0x00c0, 0x37eb: 0x00c0, 0x37ec: 0x00c0, 0x37ed: 0x00c0, 0x37ee: 0x00c0, 0x37ef: 0x00c0, + 0x37f0: 0x00c0, 0x37f1: 0x00c0, 0x37f2: 0x00c0, + // Block 0xe0, offset 0x3800 + 0x3800: 0x00c0, 0x3801: 0x00c0, 0x3802: 0x00c0, 0x3803: 0x00c0, 0x3804: 0x00c0, 0x3805: 0x00c0, + 0x3806: 0x00c0, 0x3807: 0x00c0, 0x3808: 0x00c0, 0x3809: 0x00c0, 0x380a: 0x00c0, 0x380b: 0x00c0, + 0x380c: 0x00c0, 0x380d: 0x00c0, 0x380e: 0x00c0, 0x380f: 0x00c0, 0x3810: 0x00c0, 0x3811: 0x00c0, + 0x3812: 0x00c0, 0x3813: 0x00c0, 0x3814: 0x00c0, 0x3815: 0x00c0, 0x3816: 0x00c0, 0x3817: 0x00c0, + 0x3818: 0x00c0, 0x3819: 0x00c0, 0x381a: 0x00c0, 0x381b: 0x00c0, 0x381c: 0x00c0, 0x381d: 0x00c0, + 0x381e: 0x00c0, 0x381f: 0x00c0, 0x3820: 0x00c0, 0x3821: 0x00c0, 0x3822: 0x00c0, 0x3823: 0x00c0, + 0x3824: 0x00c0, 0x3825: 0x00c0, 0x3826: 0x00c0, 0x3827: 0x00c0, 0x3828: 0x00c0, 0x3829: 0x00c0, + 0x382a: 0x00c0, 0x382b: 0x00c0, 0x382c: 0x00c0, 0x382d: 0x00c0, 0x382e: 0x00c0, 0x382f: 0x00c0, + 0x3830: 0x00c0, 0x3831: 0x00c0, 0x3832: 0x00c0, + 0x383a: 0x0080, 0x383b: 0x0080, + 0x383c: 0x0080, 0x383d: 0x0080, 0x383e: 0x0080, 0x383f: 0x0080, + // Block 0xe1, offset 0x3840 + 0x3860: 0x0080, 0x3861: 0x0080, 0x3862: 0x0080, 0x3863: 0x0080, + 0x3864: 0x0080, 0x3865: 0x0080, 0x3866: 0x0080, 0x3867: 0x0080, 0x3868: 0x0080, 0x3869: 0x0080, + 0x386a: 0x0080, 0x386b: 0x0080, 0x386c: 0x0080, 0x386d: 0x0080, 0x386e: 0x0080, 0x386f: 0x0080, + 0x3870: 0x0080, 0x3871: 0x0080, 0x3872: 0x0080, 0x3873: 0x0080, 0x3874: 0x0080, 0x3875: 0x0080, + 0x3876: 0x0080, 0x3877: 0x0080, 0x3878: 0x0080, 0x3879: 0x0080, 0x387a: 0x0080, 0x387b: 0x0080, + 0x387c: 0x0080, 0x387d: 0x0080, 0x387e: 0x0080, + // Block 0xe2, offset 0x3880 + 0x3880: 0x00c0, 0x3881: 0x00c3, 0x3882: 0x00c0, 0x3883: 0x00c0, 0x3884: 0x00c0, 0x3885: 0x00c0, + 0x3886: 0x00c0, 0x3887: 0x00c0, 0x3888: 0x00c0, 0x3889: 0x00c0, 0x388a: 0x00c0, 0x388b: 0x00c0, + 0x388c: 0x00c0, 0x388d: 0x00c0, 0x388e: 0x00c0, 0x388f: 0x00c0, 0x3890: 0x00c0, 0x3891: 0x00c0, + 0x3892: 0x00c0, 0x3893: 0x00c0, 0x3894: 0x00c0, 0x3895: 0x00c0, 0x3896: 0x00c0, 0x3897: 0x00c0, + 0x3898: 0x00c0, 0x3899: 0x00c0, 0x389a: 0x00c0, 0x389b: 0x00c0, 0x389c: 0x00c0, 0x389d: 0x00c0, + 0x389e: 0x00c0, 0x389f: 0x00c0, 0x38a0: 0x00c0, 0x38a1: 0x00c0, 0x38a2: 0x00c0, 0x38a3: 0x00c0, + 0x38a4: 0x00c0, 0x38a5: 0x00c0, 0x38a6: 0x00c0, 0x38a7: 0x00c0, 0x38a8: 0x00c0, 0x38a9: 0x00c0, + 0x38aa: 0x00c0, 0x38ab: 0x00c0, 0x38ac: 0x00c0, 0x38ad: 0x00c0, 0x38ae: 0x00c0, 0x38af: 0x00c0, + 0x38b0: 0x00c0, 0x38b1: 0x00c0, 0x38b2: 0x00c0, 0x38b3: 0x00c0, 0x38b4: 0x00c0, 0x38b5: 0x00c0, + 0x38b6: 0x00c0, 0x38b7: 0x00c0, 0x38b8: 0x00c3, 0x38b9: 0x00c3, 0x38ba: 0x00c3, 0x38bb: 0x00c3, + 0x38bc: 0x00c3, 0x38bd: 0x00c3, 0x38be: 0x00c3, 0x38bf: 0x00c3, + // Block 0xe3, offset 0x38c0 + 0x38c0: 0x00c3, 0x38c1: 0x00c3, 0x38c2: 0x00c3, 0x38c3: 0x00c3, 0x38c4: 0x00c3, 0x38c5: 0x00c3, + 0x38c6: 0x00c6, 0x38c7: 0x0080, 0x38c8: 0x0080, 0x38c9: 0x0080, 0x38ca: 0x0080, 0x38cb: 0x0080, + 0x38cc: 0x0080, 0x38cd: 0x0080, + 0x38d2: 0x0080, 0x38d3: 0x0080, 0x38d4: 0x0080, 0x38d5: 0x0080, 0x38d6: 0x0080, 0x38d7: 0x0080, + 0x38d8: 0x0080, 0x38d9: 0x0080, 0x38da: 0x0080, 0x38db: 0x0080, 0x38dc: 0x0080, 0x38dd: 0x0080, + 0x38de: 0x0080, 0x38df: 0x0080, 0x38e0: 0x0080, 0x38e1: 0x0080, 0x38e2: 0x0080, 0x38e3: 0x0080, + 0x38e4: 0x0080, 0x38e5: 0x0080, 0x38e6: 0x00c0, 0x38e7: 0x00c0, 0x38e8: 0x00c0, 0x38e9: 0x00c0, + 0x38ea: 0x00c0, 0x38eb: 0x00c0, 0x38ec: 0x00c0, 0x38ed: 0x00c0, 0x38ee: 0x00c0, 0x38ef: 0x00c0, + 0x38ff: 0x00c6, + // Block 0xe4, offset 0x3900 + 0x3900: 0x00c3, 0x3901: 0x00c3, 0x3902: 0x00c0, 0x3903: 0x00c0, 0x3904: 0x00c0, 0x3905: 0x00c0, + 0x3906: 0x00c0, 0x3907: 0x00c0, 0x3908: 0x00c0, 0x3909: 0x00c0, 0x390a: 0x00c0, 0x390b: 0x00c0, + 0x390c: 0x00c0, 0x390d: 0x00c0, 0x390e: 0x00c0, 0x390f: 0x00c0, 0x3910: 0x00c0, 0x3911: 0x00c0, + 0x3912: 0x00c0, 0x3913: 0x00c0, 0x3914: 0x00c0, 0x3915: 0x00c0, 0x3916: 0x00c0, 0x3917: 0x00c0, + 0x3918: 0x00c0, 0x3919: 0x00c0, 0x391a: 0x00c0, 0x391b: 0x00c0, 0x391c: 0x00c0, 0x391d: 0x00c0, + 0x391e: 0x00c0, 0x391f: 0x00c0, 0x3920: 0x00c0, 0x3921: 0x00c0, 0x3922: 0x00c0, 0x3923: 0x00c0, + 0x3924: 0x00c0, 0x3925: 0x00c0, 0x3926: 0x00c0, 0x3927: 0x00c0, 0x3928: 0x00c0, 0x3929: 0x00c0, + 0x392a: 0x00c0, 0x392b: 0x00c0, 0x392c: 0x00c0, 0x392d: 0x00c0, 0x392e: 0x00c0, 0x392f: 0x00c0, + 0x3930: 0x00c0, 0x3931: 0x00c0, 0x3932: 0x00c0, 0x3933: 0x00c3, 0x3934: 0x00c3, 0x3935: 0x00c3, + 0x3936: 0x00c3, 0x3937: 0x00c0, 0x3938: 0x00c0, 0x3939: 0x00c6, 0x393a: 0x00c3, 0x393b: 0x0080, + 0x393c: 0x0080, 0x393d: 0x0040, 0x393e: 0x0080, 0x393f: 0x0080, + // Block 0xe5, offset 0x3940 + 0x3940: 0x0080, 0x3941: 0x0080, + 0x3950: 0x00c0, 0x3951: 0x00c0, + 0x3952: 0x00c0, 0x3953: 0x00c0, 0x3954: 0x00c0, 0x3955: 0x00c0, 0x3956: 0x00c0, 0x3957: 0x00c0, + 0x3958: 0x00c0, 0x3959: 0x00c0, 0x395a: 0x00c0, 0x395b: 0x00c0, 0x395c: 0x00c0, 0x395d: 0x00c0, + 0x395e: 0x00c0, 0x395f: 0x00c0, 0x3960: 0x00c0, 0x3961: 0x00c0, 0x3962: 0x00c0, 0x3963: 0x00c0, + 0x3964: 0x00c0, 0x3965: 0x00c0, 0x3966: 0x00c0, 0x3967: 0x00c0, 0x3968: 0x00c0, + 0x3970: 0x00c0, 0x3971: 0x00c0, 0x3972: 0x00c0, 0x3973: 0x00c0, 0x3974: 0x00c0, 0x3975: 0x00c0, + 0x3976: 0x00c0, 0x3977: 0x00c0, 0x3978: 0x00c0, 0x3979: 0x00c0, + // Block 0xe6, offset 0x3980 + 0x3980: 0x00c3, 0x3981: 0x00c3, 0x3982: 0x00c3, 0x3983: 0x00c0, 0x3984: 0x00c0, 0x3985: 0x00c0, + 0x3986: 0x00c0, 0x3987: 0x00c0, 0x3988: 0x00c0, 0x3989: 0x00c0, 0x398a: 0x00c0, 0x398b: 0x00c0, + 0x398c: 0x00c0, 0x398d: 0x00c0, 0x398e: 0x00c0, 0x398f: 0x00c0, 0x3990: 0x00c0, 0x3991: 0x00c0, + 0x3992: 0x00c0, 0x3993: 0x00c0, 0x3994: 0x00c0, 0x3995: 0x00c0, 0x3996: 0x00c0, 0x3997: 0x00c0, + 0x3998: 0x00c0, 0x3999: 0x00c0, 0x399a: 0x00c0, 0x399b: 0x00c0, 0x399c: 0x00c0, 0x399d: 0x00c0, + 0x399e: 0x00c0, 0x399f: 0x00c0, 0x39a0: 0x00c0, 0x39a1: 0x00c0, 0x39a2: 0x00c0, 0x39a3: 0x00c0, + 0x39a4: 0x00c0, 0x39a5: 0x00c0, 0x39a6: 0x00c0, 0x39a7: 0x00c3, 0x39a8: 0x00c3, 0x39a9: 0x00c3, + 0x39aa: 0x00c3, 0x39ab: 0x00c3, 0x39ac: 0x00c0, 0x39ad: 0x00c3, 0x39ae: 0x00c3, 0x39af: 0x00c3, + 0x39b0: 0x00c3, 0x39b1: 0x00c3, 0x39b2: 0x00c3, 0x39b3: 0x00c6, 0x39b4: 0x00c6, + 0x39b6: 0x00c0, 0x39b7: 0x00c0, 0x39b8: 0x00c0, 0x39b9: 0x00c0, 0x39ba: 0x00c0, 0x39bb: 0x00c0, + 0x39bc: 0x00c0, 0x39bd: 0x00c0, 0x39be: 0x00c0, 0x39bf: 0x00c0, + // Block 0xe7, offset 0x39c0 + 0x39c0: 0x0080, 0x39c1: 0x0080, 0x39c2: 0x0080, 0x39c3: 0x0080, + 0x39d0: 0x00c0, 0x39d1: 0x00c0, + 0x39d2: 0x00c0, 0x39d3: 0x00c0, 0x39d4: 0x00c0, 0x39d5: 0x00c0, 0x39d6: 0x00c0, 0x39d7: 0x00c0, + 0x39d8: 0x00c0, 0x39d9: 0x00c0, 0x39da: 0x00c0, 0x39db: 0x00c0, 0x39dc: 0x00c0, 0x39dd: 0x00c0, + 0x39de: 0x00c0, 0x39df: 0x00c0, 0x39e0: 0x00c0, 0x39e1: 0x00c0, 0x39e2: 0x00c0, 0x39e3: 0x00c0, + 0x39e4: 0x00c0, 0x39e5: 0x00c0, 0x39e6: 0x00c0, 0x39e7: 0x00c0, 0x39e8: 0x00c0, 0x39e9: 0x00c0, + 0x39ea: 0x00c0, 0x39eb: 0x00c0, 0x39ec: 0x00c0, 0x39ed: 0x00c0, 0x39ee: 0x00c0, 0x39ef: 0x00c0, + 0x39f0: 0x00c0, 0x39f1: 0x00c0, 0x39f2: 0x00c0, 0x39f3: 0x00c3, 0x39f4: 0x0080, 0x39f5: 0x0080, + 0x39f6: 0x00c0, + // Block 0xe8, offset 0x3a00 + 0x3a00: 0x00c3, 0x3a01: 0x00c3, 0x3a02: 0x00c0, 0x3a03: 0x00c0, 0x3a04: 0x00c0, 0x3a05: 0x00c0, + 0x3a06: 0x00c0, 0x3a07: 0x00c0, 0x3a08: 0x00c0, 0x3a09: 0x00c0, 0x3a0a: 0x00c0, 0x3a0b: 0x00c0, + 0x3a0c: 0x00c0, 0x3a0d: 0x00c0, 0x3a0e: 0x00c0, 0x3a0f: 0x00c0, 0x3a10: 0x00c0, 0x3a11: 0x00c0, + 0x3a12: 0x00c0, 0x3a13: 0x00c0, 0x3a14: 0x00c0, 0x3a15: 0x00c0, 0x3a16: 0x00c0, 0x3a17: 0x00c0, + 0x3a18: 0x00c0, 0x3a19: 0x00c0, 0x3a1a: 0x00c0, 0x3a1b: 0x00c0, 0x3a1c: 0x00c0, 0x3a1d: 0x00c0, + 0x3a1e: 0x00c0, 0x3a1f: 0x00c0, 0x3a20: 0x00c0, 0x3a21: 0x00c0, 0x3a22: 0x00c0, 0x3a23: 0x00c0, + 0x3a24: 0x00c0, 0x3a25: 0x00c0, 0x3a26: 0x00c0, 0x3a27: 0x00c0, 0x3a28: 0x00c0, 0x3a29: 0x00c0, + 0x3a2a: 0x00c0, 0x3a2b: 0x00c0, 0x3a2c: 0x00c0, 0x3a2d: 0x00c0, 0x3a2e: 0x00c0, 0x3a2f: 0x00c0, + 0x3a30: 0x00c0, 0x3a31: 0x00c0, 0x3a32: 0x00c0, 0x3a33: 0x00c0, 0x3a34: 0x00c0, 0x3a35: 0x00c0, + 0x3a36: 0x00c3, 0x3a37: 0x00c3, 0x3a38: 0x00c3, 0x3a39: 0x00c3, 0x3a3a: 0x00c3, 0x3a3b: 0x00c3, + 0x3a3c: 0x00c3, 0x3a3d: 0x00c3, 0x3a3e: 0x00c3, 0x3a3f: 0x00c0, + // Block 0xe9, offset 0x3a40 + 0x3a40: 0x00c5, 0x3a41: 0x00c0, 0x3a42: 0x00c0, 0x3a43: 0x00c0, 0x3a44: 0x00c0, 0x3a45: 0x0080, + 0x3a46: 0x0080, 0x3a47: 0x0080, 0x3a48: 0x0080, 0x3a49: 0x0080, 0x3a4a: 0x00c3, 0x3a4b: 0x00c3, + 0x3a4c: 0x00c3, 0x3a4d: 0x0080, 0x3a50: 0x00c0, 0x3a51: 0x00c0, + 0x3a52: 0x00c0, 0x3a53: 0x00c0, 0x3a54: 0x00c0, 0x3a55: 0x00c0, 0x3a56: 0x00c0, 0x3a57: 0x00c0, + 0x3a58: 0x00c0, 0x3a59: 0x00c0, 0x3a5a: 0x00c0, 0x3a5b: 0x0080, 0x3a5c: 0x00c0, 0x3a5d: 0x0080, + 0x3a5e: 0x0080, 0x3a5f: 0x0080, 0x3a61: 0x0080, 0x3a62: 0x0080, 0x3a63: 0x0080, + 0x3a64: 0x0080, 0x3a65: 0x0080, 0x3a66: 0x0080, 0x3a67: 0x0080, 0x3a68: 0x0080, 0x3a69: 0x0080, + 0x3a6a: 0x0080, 0x3a6b: 0x0080, 0x3a6c: 0x0080, 0x3a6d: 0x0080, 0x3a6e: 0x0080, 0x3a6f: 0x0080, + 0x3a70: 0x0080, 0x3a71: 0x0080, 0x3a72: 0x0080, 0x3a73: 0x0080, 0x3a74: 0x0080, + // Block 0xea, offset 0x3a80 + 0x3a80: 0x00c0, 0x3a81: 0x00c0, 0x3a82: 0x00c0, 0x3a83: 0x00c0, 0x3a84: 0x00c0, 0x3a85: 0x00c0, + 0x3a86: 0x00c0, 0x3a87: 0x00c0, 0x3a88: 0x00c0, 0x3a89: 0x00c0, 0x3a8a: 0x00c0, 0x3a8b: 0x00c0, + 0x3a8c: 0x00c0, 0x3a8d: 0x00c0, 0x3a8e: 0x00c0, 0x3a8f: 0x00c0, 0x3a90: 0x00c0, 0x3a91: 0x00c0, + 0x3a93: 0x00c0, 0x3a94: 0x00c0, 0x3a95: 0x00c0, 0x3a96: 0x00c0, 0x3a97: 0x00c0, + 0x3a98: 0x00c0, 0x3a99: 0x00c0, 0x3a9a: 0x00c0, 0x3a9b: 0x00c0, 0x3a9c: 0x00c0, 0x3a9d: 0x00c0, + 0x3a9e: 0x00c0, 0x3a9f: 0x00c0, 0x3aa0: 0x00c0, 0x3aa1: 0x00c0, 0x3aa2: 0x00c0, 0x3aa3: 0x00c0, + 0x3aa4: 0x00c0, 0x3aa5: 0x00c0, 0x3aa6: 0x00c0, 0x3aa7: 0x00c0, 0x3aa8: 0x00c0, 0x3aa9: 0x00c0, + 0x3aaa: 0x00c0, 0x3aab: 0x00c0, 0x3aac: 0x00c0, 0x3aad: 0x00c0, 0x3aae: 0x00c0, 0x3aaf: 0x00c3, + 0x3ab0: 0x00c3, 0x3ab1: 0x00c3, 0x3ab2: 0x00c0, 0x3ab3: 0x00c0, 0x3ab4: 0x00c3, 0x3ab5: 0x00c5, + 0x3ab6: 0x00c3, 0x3ab7: 0x00c3, 0x3ab8: 0x0080, 0x3ab9: 0x0080, 0x3aba: 0x0080, 0x3abb: 0x0080, + 0x3abc: 0x0080, 0x3abd: 0x0080, 0x3abe: 0x00c3, + // Block 0xeb, offset 0x3ac0 + 0x3ac0: 0x00c0, 0x3ac1: 0x00c0, 0x3ac2: 0x00c0, 0x3ac3: 0x00c0, 0x3ac4: 0x00c0, 0x3ac5: 0x00c0, + 0x3ac6: 0x00c0, 0x3ac8: 0x00c0, 0x3aca: 0x00c0, 0x3acb: 0x00c0, + 0x3acc: 0x00c0, 0x3acd: 0x00c0, 0x3acf: 0x00c0, 0x3ad0: 0x00c0, 0x3ad1: 0x00c0, + 0x3ad2: 0x00c0, 0x3ad3: 0x00c0, 0x3ad4: 0x00c0, 0x3ad5: 0x00c0, 0x3ad6: 0x00c0, 0x3ad7: 0x00c0, + 0x3ad8: 0x00c0, 0x3ad9: 0x00c0, 0x3ada: 0x00c0, 0x3adb: 0x00c0, 0x3adc: 0x00c0, 0x3add: 0x00c0, + 0x3adf: 0x00c0, 0x3ae0: 0x00c0, 0x3ae1: 0x00c0, 0x3ae2: 0x00c0, 0x3ae3: 0x00c0, + 0x3ae4: 0x00c0, 0x3ae5: 0x00c0, 0x3ae6: 0x00c0, 0x3ae7: 0x00c0, 0x3ae8: 0x00c0, 0x3ae9: 0x0080, + 0x3af0: 0x00c0, 0x3af1: 0x00c0, 0x3af2: 0x00c0, 0x3af3: 0x00c0, 0x3af4: 0x00c0, 0x3af5: 0x00c0, + 0x3af6: 0x00c0, 0x3af7: 0x00c0, 0x3af8: 0x00c0, 0x3af9: 0x00c0, 0x3afa: 0x00c0, 0x3afb: 0x00c0, + 0x3afc: 0x00c0, 0x3afd: 0x00c0, 0x3afe: 0x00c0, 0x3aff: 0x00c0, + // Block 0xec, offset 0x3b00 + 0x3b00: 0x00c0, 0x3b01: 0x00c0, 0x3b02: 0x00c0, 0x3b03: 0x00c0, 0x3b04: 0x00c0, 0x3b05: 0x00c0, + 0x3b06: 0x00c0, 0x3b07: 0x00c0, 0x3b08: 0x00c0, 0x3b09: 0x00c0, 0x3b0a: 0x00c0, 0x3b0b: 0x00c0, + 0x3b0c: 0x00c0, 0x3b0d: 0x00c0, 0x3b0e: 0x00c0, 0x3b0f: 0x00c0, 0x3b10: 0x00c0, 0x3b11: 0x00c0, + 0x3b12: 0x00c0, 0x3b13: 0x00c0, 0x3b14: 0x00c0, 0x3b15: 0x00c0, 0x3b16: 0x00c0, 0x3b17: 0x00c0, + 0x3b18: 0x00c0, 0x3b19: 0x00c0, 0x3b1a: 0x00c0, 0x3b1b: 0x00c0, 0x3b1c: 0x00c0, 0x3b1d: 0x00c0, + 0x3b1e: 0x00c0, 0x3b1f: 0x00c3, 0x3b20: 0x00c0, 0x3b21: 0x00c0, 0x3b22: 0x00c0, 0x3b23: 0x00c3, + 0x3b24: 0x00c3, 0x3b25: 0x00c3, 0x3b26: 0x00c3, 0x3b27: 0x00c3, 0x3b28: 0x00c3, 0x3b29: 0x00c3, + 0x3b2a: 0x00c6, + 0x3b30: 0x00c0, 0x3b31: 0x00c0, 0x3b32: 0x00c0, 0x3b33: 0x00c0, 0x3b34: 0x00c0, 0x3b35: 0x00c0, + 0x3b36: 0x00c0, 0x3b37: 0x00c0, 0x3b38: 0x00c0, 0x3b39: 0x00c0, + // Block 0xed, offset 0x3b40 + 0x3b40: 0x00c3, 0x3b41: 0x00c3, 0x3b42: 0x00c0, 0x3b43: 0x00c0, 0x3b45: 0x00c0, + 0x3b46: 0x00c0, 0x3b47: 0x00c0, 0x3b48: 0x00c0, 0x3b49: 0x00c0, 0x3b4a: 0x00c0, 0x3b4b: 0x00c0, + 0x3b4c: 0x00c0, 0x3b4f: 0x00c0, 0x3b50: 0x00c0, + 0x3b53: 0x00c0, 0x3b54: 0x00c0, 0x3b55: 0x00c0, 0x3b56: 0x00c0, 0x3b57: 0x00c0, + 0x3b58: 0x00c0, 0x3b59: 0x00c0, 0x3b5a: 0x00c0, 0x3b5b: 0x00c0, 0x3b5c: 0x00c0, 0x3b5d: 0x00c0, + 0x3b5e: 0x00c0, 0x3b5f: 0x00c0, 0x3b60: 0x00c0, 0x3b61: 0x00c0, 0x3b62: 0x00c0, 0x3b63: 0x00c0, + 0x3b64: 0x00c0, 0x3b65: 0x00c0, 0x3b66: 0x00c0, 0x3b67: 0x00c0, 0x3b68: 0x00c0, + 0x3b6a: 0x00c0, 0x3b6b: 0x00c0, 0x3b6c: 0x00c0, 0x3b6d: 0x00c0, 0x3b6e: 0x00c0, 0x3b6f: 0x00c0, + 0x3b70: 0x00c0, 0x3b72: 0x00c0, 0x3b73: 0x00c0, 0x3b75: 0x00c0, + 0x3b76: 0x00c0, 0x3b77: 0x00c0, 0x3b78: 0x00c0, 0x3b79: 0x00c0, + 0x3b7c: 0x00c3, 0x3b7d: 0x00c0, 0x3b7e: 0x00c0, 0x3b7f: 0x00c0, + // Block 0xee, offset 0x3b80 + 0x3b80: 0x00c3, 0x3b81: 0x00c0, 0x3b82: 0x00c0, 0x3b83: 0x00c0, 0x3b84: 0x00c0, + 0x3b87: 0x00c0, 0x3b88: 0x00c0, 0x3b8b: 0x00c0, + 0x3b8c: 0x00c0, 0x3b8d: 0x00c5, 0x3b90: 0x00c0, + 0x3b97: 0x00c0, + 0x3b9d: 0x00c0, + 0x3b9e: 0x00c0, 0x3b9f: 0x00c0, 0x3ba0: 0x00c0, 0x3ba1: 0x00c0, 0x3ba2: 0x00c0, 0x3ba3: 0x00c0, + 0x3ba6: 0x00c3, 0x3ba7: 0x00c3, 0x3ba8: 0x00c3, 0x3ba9: 0x00c3, + 0x3baa: 0x00c3, 0x3bab: 0x00c3, 0x3bac: 0x00c3, + 0x3bb0: 0x00c3, 0x3bb1: 0x00c3, 0x3bb2: 0x00c3, 0x3bb3: 0x00c3, 0x3bb4: 0x00c3, + // Block 0xef, offset 0x3bc0 + 0x3bc0: 0x00c0, 0x3bc1: 0x00c0, 0x3bc2: 0x00c0, 0x3bc3: 0x00c0, 0x3bc4: 0x00c0, 0x3bc5: 0x00c0, + 0x3bc6: 0x00c0, 0x3bc7: 0x00c0, 0x3bc8: 0x00c0, 0x3bc9: 0x00c0, 0x3bca: 0x00c0, 0x3bcb: 0x00c0, + 0x3bcc: 0x00c0, 0x3bcd: 0x00c0, 0x3bce: 0x00c0, 0x3bcf: 0x00c0, 0x3bd0: 0x00c0, 0x3bd1: 0x00c0, + 0x3bd2: 0x00c0, 0x3bd3: 0x00c0, 0x3bd4: 0x00c0, 0x3bd5: 0x00c0, 0x3bd6: 0x00c0, 0x3bd7: 0x00c0, + 0x3bd8: 0x00c0, 0x3bd9: 0x00c0, 0x3bda: 0x00c0, 0x3bdb: 0x00c0, 0x3bdc: 0x00c0, 0x3bdd: 0x00c0, + 0x3bde: 0x00c0, 0x3bdf: 0x00c0, 0x3be0: 0x00c0, 0x3be1: 0x00c0, 0x3be2: 0x00c0, 0x3be3: 0x00c0, + 0x3be4: 0x00c0, 0x3be5: 0x00c0, 0x3be6: 0x00c0, 0x3be7: 0x00c0, 0x3be8: 0x00c0, 0x3be9: 0x00c0, + 0x3bea: 0x00c0, 0x3beb: 0x00c0, 0x3bec: 0x00c0, 0x3bed: 0x00c0, 0x3bee: 0x00c0, 0x3bef: 0x00c0, + 0x3bf0: 0x00c0, 0x3bf1: 0x00c0, 0x3bf2: 0x00c0, 0x3bf3: 0x00c0, 0x3bf4: 0x00c0, 0x3bf5: 0x00c0, + 0x3bf6: 0x00c0, 0x3bf7: 0x00c0, 0x3bf8: 0x00c3, 0x3bf9: 0x00c3, 0x3bfa: 0x00c3, 0x3bfb: 0x00c3, + 0x3bfc: 0x00c3, 0x3bfd: 0x00c3, 0x3bfe: 0x00c3, 0x3bff: 0x00c3, + // Block 0xf0, offset 0x3c00 + 0x3c00: 0x00c0, 0x3c01: 0x00c0, 0x3c02: 0x00c6, 0x3c03: 0x00c3, 0x3c04: 0x00c3, 0x3c05: 0x00c0, + 0x3c06: 0x00c3, 0x3c07: 0x00c0, 0x3c08: 0x00c0, 0x3c09: 0x00c0, 0x3c0a: 0x00c0, 0x3c0b: 0x0080, + 0x3c0c: 0x0080, 0x3c0d: 0x0080, 0x3c0e: 0x0080, 0x3c0f: 0x0080, 0x3c10: 0x00c0, 0x3c11: 0x00c0, + 0x3c12: 0x00c0, 0x3c13: 0x00c0, 0x3c14: 0x00c0, 0x3c15: 0x00c0, 0x3c16: 0x00c0, 0x3c17: 0x00c0, + 0x3c18: 0x00c0, 0x3c19: 0x00c0, 0x3c1b: 0x0080, 0x3c1d: 0x0080, + // Block 0xf1, offset 0x3c40 + 0x3c40: 0x00c0, 0x3c41: 0x00c0, 0x3c42: 0x00c0, 0x3c43: 0x00c0, 0x3c44: 0x00c0, 0x3c45: 0x00c0, + 0x3c46: 0x00c0, 0x3c47: 0x00c0, 0x3c48: 0x00c0, 0x3c49: 0x00c0, 0x3c4a: 0x00c0, 0x3c4b: 0x00c0, + 0x3c4c: 0x00c0, 0x3c4d: 0x00c0, 0x3c4e: 0x00c0, 0x3c4f: 0x00c0, 0x3c50: 0x00c0, 0x3c51: 0x00c0, + 0x3c52: 0x00c0, 0x3c53: 0x00c0, 0x3c54: 0x00c0, 0x3c55: 0x00c0, 0x3c56: 0x00c0, 0x3c57: 0x00c0, + 0x3c58: 0x00c0, 0x3c59: 0x00c0, 0x3c5a: 0x00c0, 0x3c5b: 0x00c0, 0x3c5c: 0x00c0, 0x3c5d: 0x00c0, + 0x3c5e: 0x00c0, 0x3c5f: 0x00c0, 0x3c60: 0x00c0, 0x3c61: 0x00c0, 0x3c62: 0x00c0, 0x3c63: 0x00c0, + 0x3c64: 0x00c0, 0x3c65: 0x00c0, 0x3c66: 0x00c0, 0x3c67: 0x00c0, 0x3c68: 0x00c0, 0x3c69: 0x00c0, + 0x3c6a: 0x00c0, 0x3c6b: 0x00c0, 0x3c6c: 0x00c0, 0x3c6d: 0x00c0, 0x3c6e: 0x00c0, 0x3c6f: 0x00c0, + 0x3c70: 0x00c0, 0x3c71: 0x00c0, 0x3c72: 0x00c0, 0x3c73: 0x00c3, 0x3c74: 0x00c3, 0x3c75: 0x00c3, + 0x3c76: 0x00c3, 0x3c77: 0x00c3, 0x3c78: 0x00c3, 0x3c79: 0x00c0, 0x3c7a: 0x00c3, 0x3c7b: 0x00c0, + 0x3c7c: 0x00c0, 0x3c7d: 0x00c0, 0x3c7e: 0x00c0, 0x3c7f: 0x00c3, + // Block 0xf2, offset 0x3c80 + 0x3c80: 0x00c3, 0x3c81: 0x00c0, 0x3c82: 0x00c6, 0x3c83: 0x00c3, 0x3c84: 0x00c0, 0x3c85: 0x00c0, + 0x3c86: 0x0080, 0x3c87: 0x00c0, + 0x3c90: 0x00c0, 0x3c91: 0x00c0, + 0x3c92: 0x00c0, 0x3c93: 0x00c0, 0x3c94: 0x00c0, 0x3c95: 0x00c0, 0x3c96: 0x00c0, 0x3c97: 0x00c0, + 0x3c98: 0x00c0, 0x3c99: 0x00c0, + // Block 0xf3, offset 0x3cc0 + 0x3cc0: 0x00c0, 0x3cc1: 0x00c0, 0x3cc2: 0x00c0, 0x3cc3: 0x00c0, 0x3cc4: 0x00c0, 0x3cc5: 0x00c0, + 0x3cc6: 0x00c0, 0x3cc7: 0x00c0, 0x3cc8: 0x00c0, 0x3cc9: 0x00c0, 0x3cca: 0x00c0, 0x3ccb: 0x00c0, + 0x3ccc: 0x00c0, 0x3ccd: 0x00c0, 0x3cce: 0x00c0, 0x3ccf: 0x00c0, 0x3cd0: 0x00c0, 0x3cd1: 0x00c0, + 0x3cd2: 0x00c0, 0x3cd3: 0x00c0, 0x3cd4: 0x00c0, 0x3cd5: 0x00c0, 0x3cd6: 0x00c0, 0x3cd7: 0x00c0, + 0x3cd8: 0x00c0, 0x3cd9: 0x00c0, 0x3cda: 0x00c0, 0x3cdb: 0x00c0, 0x3cdc: 0x00c0, 0x3cdd: 0x00c0, + 0x3cde: 0x00c0, 0x3cdf: 0x00c0, 0x3ce0: 0x00c0, 0x3ce1: 0x00c0, 0x3ce2: 0x00c0, 0x3ce3: 0x00c0, + 0x3ce4: 0x00c0, 0x3ce5: 0x00c0, 0x3ce6: 0x00c0, 0x3ce7: 0x00c0, 0x3ce8: 0x00c0, 0x3ce9: 0x00c0, + 0x3cea: 0x00c0, 0x3ceb: 0x00c0, 0x3cec: 0x00c0, 0x3ced: 0x00c0, 0x3cee: 0x00c0, 0x3cef: 0x00c0, + 0x3cf0: 0x00c0, 0x3cf1: 0x00c0, 0x3cf2: 0x00c3, 0x3cf3: 0x00c3, 0x3cf4: 0x00c3, 0x3cf5: 0x00c3, + 0x3cf8: 0x00c0, 0x3cf9: 0x00c0, 0x3cfa: 0x00c0, 0x3cfb: 0x00c0, + 0x3cfc: 0x00c3, 0x3cfd: 0x00c3, 0x3cfe: 0x00c0, 0x3cff: 0x00c6, + // Block 0xf4, offset 0x3d00 + 0x3d00: 0x00c3, 0x3d01: 0x0080, 0x3d02: 0x0080, 0x3d03: 0x0080, 0x3d04: 0x0080, 0x3d05: 0x0080, + 0x3d06: 0x0080, 0x3d07: 0x0080, 0x3d08: 0x0080, 0x3d09: 0x0080, 0x3d0a: 0x0080, 0x3d0b: 0x0080, + 0x3d0c: 0x0080, 0x3d0d: 0x0080, 0x3d0e: 0x0080, 0x3d0f: 0x0080, 0x3d10: 0x0080, 0x3d11: 0x0080, + 0x3d12: 0x0080, 0x3d13: 0x0080, 0x3d14: 0x0080, 0x3d15: 0x0080, 0x3d16: 0x0080, 0x3d17: 0x0080, + 0x3d18: 0x00c0, 0x3d19: 0x00c0, 0x3d1a: 0x00c0, 0x3d1b: 0x00c0, 0x3d1c: 0x00c3, 0x3d1d: 0x00c3, + // Block 0xf5, offset 0x3d40 + 0x3d40: 0x00c0, 0x3d41: 0x00c0, 0x3d42: 0x00c0, 0x3d43: 0x00c0, 0x3d44: 0x00c0, 0x3d45: 0x00c0, + 0x3d46: 0x00c0, 0x3d47: 0x00c0, 0x3d48: 0x00c0, 0x3d49: 0x00c0, 0x3d4a: 0x00c0, 0x3d4b: 0x00c0, + 0x3d4c: 0x00c0, 0x3d4d: 0x00c0, 0x3d4e: 0x00c0, 0x3d4f: 0x00c0, 0x3d50: 0x00c0, 0x3d51: 0x00c0, + 0x3d52: 0x00c0, 0x3d53: 0x00c0, 0x3d54: 0x00c0, 0x3d55: 0x00c0, 0x3d56: 0x00c0, 0x3d57: 0x00c0, + 0x3d58: 0x00c0, 0x3d59: 0x00c0, 0x3d5a: 0x00c0, 0x3d5b: 0x00c0, 0x3d5c: 0x00c0, 0x3d5d: 0x00c0, + 0x3d5e: 0x00c0, 0x3d5f: 0x00c0, 0x3d60: 0x00c0, 0x3d61: 0x00c0, 0x3d62: 0x00c0, 0x3d63: 0x00c0, + 0x3d64: 0x00c0, 0x3d65: 0x00c0, 0x3d66: 0x00c0, 0x3d67: 0x00c0, 0x3d68: 0x00c0, 0x3d69: 0x00c0, + 0x3d6a: 0x00c0, 0x3d6b: 0x00c0, 0x3d6c: 0x00c0, 0x3d6d: 0x00c0, 0x3d6e: 0x00c0, 0x3d6f: 0x00c0, + 0x3d70: 0x00c0, 0x3d71: 0x00c0, 0x3d72: 0x00c0, 0x3d73: 0x00c3, 0x3d74: 0x00c3, 0x3d75: 0x00c3, + 0x3d76: 0x00c3, 0x3d77: 0x00c3, 0x3d78: 0x00c3, 0x3d79: 0x00c3, 0x3d7a: 0x00c3, 0x3d7b: 0x00c0, + 0x3d7c: 0x00c0, 0x3d7d: 0x00c3, 0x3d7e: 0x00c0, 0x3d7f: 0x00c6, + // Block 0xf6, offset 0x3d80 + 0x3d80: 0x00c3, 0x3d81: 0x0080, 0x3d82: 0x0080, 0x3d83: 0x0080, 0x3d84: 0x00c0, + 0x3d90: 0x00c0, 0x3d91: 0x00c0, + 0x3d92: 0x00c0, 0x3d93: 0x00c0, 0x3d94: 0x00c0, 0x3d95: 0x00c0, 0x3d96: 0x00c0, 0x3d97: 0x00c0, + 0x3d98: 0x00c0, 0x3d99: 0x00c0, + 0x3da0: 0x0080, 0x3da1: 0x0080, 0x3da2: 0x0080, 0x3da3: 0x0080, + 0x3da4: 0x0080, 0x3da5: 0x0080, 0x3da6: 0x0080, 0x3da7: 0x0080, 0x3da8: 0x0080, 0x3da9: 0x0080, + 0x3daa: 0x0080, 0x3dab: 0x0080, 0x3dac: 0x0080, + // Block 0xf7, offset 0x3dc0 + 0x3dc0: 0x00c0, 0x3dc1: 0x00c0, 0x3dc2: 0x00c0, 0x3dc3: 0x00c0, 0x3dc4: 0x00c0, 0x3dc5: 0x00c0, + 0x3dc6: 0x00c0, 0x3dc7: 0x00c0, 0x3dc8: 0x00c0, 0x3dc9: 0x00c0, 0x3dca: 0x00c0, 0x3dcb: 0x00c0, + 0x3dcc: 0x00c0, 0x3dcd: 0x00c0, 0x3dce: 0x00c0, 0x3dcf: 0x00c0, 0x3dd0: 0x00c0, 0x3dd1: 0x00c0, + 0x3dd2: 0x00c0, 0x3dd3: 0x00c0, 0x3dd4: 0x00c0, 0x3dd5: 0x00c0, 0x3dd6: 0x00c0, 0x3dd7: 0x00c0, + 0x3dd8: 0x00c0, 0x3dd9: 0x00c0, 0x3dda: 0x00c0, 0x3ddb: 0x00c0, 0x3ddc: 0x00c0, 0x3ddd: 0x00c0, + 0x3dde: 0x00c0, 0x3ddf: 0x00c0, 0x3de0: 0x00c0, 0x3de1: 0x00c0, 0x3de2: 0x00c0, 0x3de3: 0x00c0, + 0x3de4: 0x00c0, 0x3de5: 0x00c0, 0x3de6: 0x00c0, 0x3de7: 0x00c0, 0x3de8: 0x00c0, 0x3de9: 0x00c0, + 0x3dea: 0x00c0, 0x3deb: 0x00c3, 0x3dec: 0x00c0, 0x3ded: 0x00c3, 0x3dee: 0x00c0, 0x3def: 0x00c0, + 0x3df0: 0x00c3, 0x3df1: 0x00c3, 0x3df2: 0x00c3, 0x3df3: 0x00c3, 0x3df4: 0x00c3, 0x3df5: 0x00c3, + 0x3df6: 0x00c5, 0x3df7: 0x00c3, + // Block 0xf8, offset 0x3e00 + 0x3e00: 0x00c0, 0x3e01: 0x00c0, 0x3e02: 0x00c0, 0x3e03: 0x00c0, 0x3e04: 0x00c0, 0x3e05: 0x00c0, + 0x3e06: 0x00c0, 0x3e07: 0x00c0, 0x3e08: 0x00c0, 0x3e09: 0x00c0, + // Block 0xf9, offset 0x3e40 + 0x3e40: 0x00c0, 0x3e41: 0x00c0, 0x3e42: 0x00c0, 0x3e43: 0x00c0, 0x3e44: 0x00c0, 0x3e45: 0x00c0, + 0x3e46: 0x00c0, 0x3e47: 0x00c0, 0x3e48: 0x00c0, 0x3e49: 0x00c0, 0x3e4a: 0x00c0, 0x3e4b: 0x00c0, + 0x3e4c: 0x00c0, 0x3e4d: 0x00c0, 0x3e4e: 0x00c0, 0x3e4f: 0x00c0, 0x3e50: 0x00c0, 0x3e51: 0x00c0, + 0x3e52: 0x00c0, 0x3e53: 0x00c0, 0x3e54: 0x00c0, 0x3e55: 0x00c0, 0x3e56: 0x00c0, 0x3e57: 0x00c0, + 0x3e58: 0x00c0, 0x3e59: 0x00c0, 0x3e5d: 0x00c3, + 0x3e5e: 0x00c3, 0x3e5f: 0x00c3, 0x3e60: 0x00c0, 0x3e61: 0x00c0, 0x3e62: 0x00c3, 0x3e63: 0x00c3, + 0x3e64: 0x00c3, 0x3e65: 0x00c3, 0x3e66: 0x00c0, 0x3e67: 0x00c3, 0x3e68: 0x00c3, 0x3e69: 0x00c3, + 0x3e6a: 0x00c3, 0x3e6b: 0x00c6, + 0x3e70: 0x00c0, 0x3e71: 0x00c0, 0x3e72: 0x00c0, 0x3e73: 0x00c0, 0x3e74: 0x00c0, 0x3e75: 0x00c0, + 0x3e76: 0x00c0, 0x3e77: 0x00c0, 0x3e78: 0x00c0, 0x3e79: 0x00c0, 0x3e7a: 0x0080, 0x3e7b: 0x0080, + 0x3e7c: 0x0080, 0x3e7d: 0x0080, 0x3e7e: 0x0080, 0x3e7f: 0x0080, + // Block 0xfa, offset 0x3e80 + 0x3ea0: 0x00c0, 0x3ea1: 0x00c0, 0x3ea2: 0x00c0, 0x3ea3: 0x00c0, + 0x3ea4: 0x00c0, 0x3ea5: 0x00c0, 0x3ea6: 0x00c0, 0x3ea7: 0x00c0, 0x3ea8: 0x00c0, 0x3ea9: 0x00c0, + 0x3eaa: 0x00c0, 0x3eab: 0x00c0, 0x3eac: 0x00c0, 0x3ead: 0x00c0, 0x3eae: 0x00c0, 0x3eaf: 0x00c0, + 0x3eb0: 0x00c0, 0x3eb1: 0x00c0, 0x3eb2: 0x00c0, 0x3eb3: 0x00c0, 0x3eb4: 0x00c0, 0x3eb5: 0x00c0, + 0x3eb6: 0x00c0, 0x3eb7: 0x00c0, 0x3eb8: 0x00c0, 0x3eb9: 0x00c0, 0x3eba: 0x00c0, 0x3ebb: 0x00c0, + 0x3ebc: 0x00c0, 0x3ebd: 0x00c0, 0x3ebe: 0x00c0, 0x3ebf: 0x00c0, + // Block 0xfb, offset 0x3ec0 + 0x3ec0: 0x00c0, 0x3ec1: 0x00c0, 0x3ec2: 0x00c0, 0x3ec3: 0x00c0, 0x3ec4: 0x00c0, 0x3ec5: 0x00c0, + 0x3ec6: 0x00c0, 0x3ec7: 0x00c0, 0x3ec8: 0x00c0, 0x3ec9: 0x00c0, 0x3eca: 0x00c0, 0x3ecb: 0x00c0, + 0x3ecc: 0x00c0, 0x3ecd: 0x00c0, 0x3ece: 0x00c0, 0x3ecf: 0x00c0, 0x3ed0: 0x00c0, 0x3ed1: 0x00c0, + 0x3ed2: 0x00c0, 0x3ed3: 0x00c0, 0x3ed4: 0x00c0, 0x3ed5: 0x00c0, 0x3ed6: 0x00c0, 0x3ed7: 0x00c0, + 0x3ed8: 0x00c0, 0x3ed9: 0x00c0, 0x3eda: 0x00c0, 0x3edb: 0x00c0, 0x3edc: 0x00c0, 0x3edd: 0x00c0, + 0x3ede: 0x00c0, 0x3edf: 0x00c0, 0x3ee0: 0x00c0, 0x3ee1: 0x00c0, 0x3ee2: 0x00c0, 0x3ee3: 0x00c0, + 0x3ee4: 0x00c0, 0x3ee5: 0x00c0, 0x3ee6: 0x00c0, 0x3ee7: 0x00c0, 0x3ee8: 0x00c0, 0x3ee9: 0x00c0, + 0x3eea: 0x0080, 0x3eeb: 0x0080, 0x3eec: 0x0080, 0x3eed: 0x0080, 0x3eee: 0x0080, 0x3eef: 0x0080, + 0x3ef0: 0x0080, 0x3ef1: 0x0080, 0x3ef2: 0x0080, + 0x3eff: 0x00c0, + // Block 0xfc, offset 0x3f00 + 0x3f00: 0x00c0, 0x3f01: 0x00c0, 0x3f02: 0x00c0, 0x3f03: 0x00c0, 0x3f04: 0x00c0, 0x3f05: 0x00c0, + 0x3f06: 0x00c0, 0x3f07: 0x00c0, 0x3f08: 0x00c0, 0x3f09: 0x00c0, 0x3f0a: 0x00c0, 0x3f0b: 0x00c0, + 0x3f0c: 0x00c0, 0x3f0d: 0x00c0, 0x3f0e: 0x00c0, 0x3f0f: 0x00c0, 0x3f10: 0x00c0, 0x3f11: 0x00c0, + 0x3f12: 0x00c0, 0x3f13: 0x00c0, 0x3f14: 0x00c0, 0x3f15: 0x00c0, 0x3f16: 0x00c0, 0x3f17: 0x00c0, + 0x3f18: 0x00c0, 0x3f19: 0x00c0, 0x3f1a: 0x00c0, 0x3f1b: 0x00c0, 0x3f1c: 0x00c0, 0x3f1d: 0x00c0, + 0x3f1e: 0x00c0, 0x3f1f: 0x00c0, 0x3f20: 0x00c0, 0x3f21: 0x00c0, 0x3f22: 0x00c0, 0x3f23: 0x00c0, + 0x3f24: 0x00c0, 0x3f25: 0x00c0, 0x3f26: 0x00c0, 0x3f27: 0x00c0, 0x3f28: 0x00c0, 0x3f29: 0x00c0, + 0x3f2a: 0x00c0, 0x3f2b: 0x00c0, 0x3f2c: 0x00c0, 0x3f2d: 0x00c0, 0x3f2e: 0x00c0, 0x3f2f: 0x00c0, + 0x3f30: 0x00c0, 0x3f31: 0x00c0, 0x3f32: 0x00c0, 0x3f33: 0x00c0, 0x3f34: 0x00c0, 0x3f35: 0x00c0, + 0x3f36: 0x00c0, 0x3f37: 0x00c0, 0x3f38: 0x00c0, + // Block 0xfd, offset 0x3f40 + 0x3f40: 0x00c0, 0x3f41: 0x00c0, 0x3f42: 0x00c0, 0x3f43: 0x00c0, 0x3f44: 0x00c0, 0x3f45: 0x00c0, + 0x3f46: 0x00c0, 0x3f47: 0x00c0, 0x3f48: 0x00c0, 0x3f4a: 0x00c0, 0x3f4b: 0x00c0, + 0x3f4c: 0x00c0, 0x3f4d: 0x00c0, 0x3f4e: 0x00c0, 0x3f4f: 0x00c0, 0x3f50: 0x00c0, 0x3f51: 0x00c0, + 0x3f52: 0x00c0, 0x3f53: 0x00c0, 0x3f54: 0x00c0, 0x3f55: 0x00c0, 0x3f56: 0x00c0, 0x3f57: 0x00c0, + 0x3f58: 0x00c0, 0x3f59: 0x00c0, 0x3f5a: 0x00c0, 0x3f5b: 0x00c0, 0x3f5c: 0x00c0, 0x3f5d: 0x00c0, + 0x3f5e: 0x00c0, 0x3f5f: 0x00c0, 0x3f60: 0x00c0, 0x3f61: 0x00c0, 0x3f62: 0x00c0, 0x3f63: 0x00c0, + 0x3f64: 0x00c0, 0x3f65: 0x00c0, 0x3f66: 0x00c0, 0x3f67: 0x00c0, 0x3f68: 0x00c0, 0x3f69: 0x00c0, + 0x3f6a: 0x00c0, 0x3f6b: 0x00c0, 0x3f6c: 0x00c0, 0x3f6d: 0x00c0, 0x3f6e: 0x00c0, 0x3f6f: 0x00c0, + 0x3f70: 0x00c3, 0x3f71: 0x00c3, 0x3f72: 0x00c3, 0x3f73: 0x00c3, 0x3f74: 0x00c3, 0x3f75: 0x00c3, + 0x3f76: 0x00c3, 0x3f78: 0x00c3, 0x3f79: 0x00c3, 0x3f7a: 0x00c3, 0x3f7b: 0x00c3, + 0x3f7c: 0x00c3, 0x3f7d: 0x00c3, 0x3f7e: 0x00c0, 0x3f7f: 0x00c6, + // Block 0xfe, offset 0x3f80 + 0x3f80: 0x00c0, 0x3f81: 0x0080, 0x3f82: 0x0080, 0x3f83: 0x0080, 0x3f84: 0x0080, 0x3f85: 0x0080, + 0x3f90: 0x00c0, 0x3f91: 0x00c0, + 0x3f92: 0x00c0, 0x3f93: 0x00c0, 0x3f94: 0x00c0, 0x3f95: 0x00c0, 0x3f96: 0x00c0, 0x3f97: 0x00c0, + 0x3f98: 0x00c0, 0x3f99: 0x00c0, 0x3f9a: 0x0080, 0x3f9b: 0x0080, 0x3f9c: 0x0080, 0x3f9d: 0x0080, + 0x3f9e: 0x0080, 0x3f9f: 0x0080, 0x3fa0: 0x0080, 0x3fa1: 0x0080, 0x3fa2: 0x0080, 0x3fa3: 0x0080, + 0x3fa4: 0x0080, 0x3fa5: 0x0080, 0x3fa6: 0x0080, 0x3fa7: 0x0080, 0x3fa8: 0x0080, 0x3fa9: 0x0080, + 0x3faa: 0x0080, 0x3fab: 0x0080, 0x3fac: 0x0080, + 0x3fb0: 0x0080, 0x3fb1: 0x0080, 0x3fb2: 0x00c0, 0x3fb3: 0x00c0, 0x3fb4: 0x00c0, 0x3fb5: 0x00c0, + 0x3fb6: 0x00c0, 0x3fb7: 0x00c0, 0x3fb8: 0x00c0, 0x3fb9: 0x00c0, 0x3fba: 0x00c0, 0x3fbb: 0x00c0, + 0x3fbc: 0x00c0, 0x3fbd: 0x00c0, 0x3fbe: 0x00c0, 0x3fbf: 0x00c0, + // Block 0xff, offset 0x3fc0 + 0x3fc0: 0x00c0, 0x3fc1: 0x00c0, 0x3fc2: 0x00c0, 0x3fc3: 0x00c0, 0x3fc4: 0x00c0, 0x3fc5: 0x00c0, + 0x3fc6: 0x00c0, 0x3fc7: 0x00c0, 0x3fc8: 0x00c0, 0x3fc9: 0x00c0, 0x3fca: 0x00c0, 0x3fcb: 0x00c0, + 0x3fcc: 0x00c0, 0x3fcd: 0x00c0, 0x3fce: 0x00c0, 0x3fcf: 0x00c0, + 0x3fd2: 0x00c3, 0x3fd3: 0x00c3, 0x3fd4: 0x00c3, 0x3fd5: 0x00c3, 0x3fd6: 0x00c3, 0x3fd7: 0x00c3, + 0x3fd8: 0x00c3, 0x3fd9: 0x00c3, 0x3fda: 0x00c3, 0x3fdb: 0x00c3, 0x3fdc: 0x00c3, 0x3fdd: 0x00c3, + 0x3fde: 0x00c3, 0x3fdf: 0x00c3, 0x3fe0: 0x00c3, 0x3fe1: 0x00c3, 0x3fe2: 0x00c3, 0x3fe3: 0x00c3, + 0x3fe4: 0x00c3, 0x3fe5: 0x00c3, 0x3fe6: 0x00c3, 0x3fe7: 0x00c3, 0x3fe9: 0x00c0, + 0x3fea: 0x00c3, 0x3feb: 0x00c3, 0x3fec: 0x00c3, 0x3fed: 0x00c3, 0x3fee: 0x00c3, 0x3fef: 0x00c3, + 0x3ff0: 0x00c3, 0x3ff1: 0x00c0, 0x3ff2: 0x00c3, 0x3ff3: 0x00c3, 0x3ff4: 0x00c0, 0x3ff5: 0x00c3, + 0x3ff6: 0x00c3, + // Block 0x100, offset 0x4000 + 0x4000: 0x00c0, 0x4001: 0x00c0, 0x4002: 0x00c0, 0x4003: 0x00c0, 0x4004: 0x00c0, 0x4005: 0x00c0, + 0x4006: 0x00c0, 0x4007: 0x00c0, 0x4008: 0x00c0, 0x4009: 0x00c0, 0x400a: 0x00c0, 0x400b: 0x00c0, + 0x400c: 0x00c0, 0x400d: 0x00c0, 0x400e: 0x00c0, 0x400f: 0x00c0, 0x4010: 0x00c0, 0x4011: 0x00c0, + 0x4012: 0x00c0, 0x4013: 0x00c0, 0x4014: 0x00c0, 0x4015: 0x00c0, 0x4016: 0x00c0, 0x4017: 0x00c0, + 0x4018: 0x00c0, 0x4019: 0x00c0, + // Block 0x101, offset 0x4040 + 0x4040: 0x0080, 0x4041: 0x0080, 0x4042: 0x0080, 0x4043: 0x0080, 0x4044: 0x0080, 0x4045: 0x0080, + 0x4046: 0x0080, 0x4047: 0x0080, 0x4048: 0x0080, 0x4049: 0x0080, 0x404a: 0x0080, 0x404b: 0x0080, + 0x404c: 0x0080, 0x404d: 0x0080, 0x404e: 0x0080, 0x404f: 0x0080, 0x4050: 0x0080, 0x4051: 0x0080, + 0x4052: 0x0080, 0x4053: 0x0080, 0x4054: 0x0080, 0x4055: 0x0080, 0x4056: 0x0080, 0x4057: 0x0080, + 0x4058: 0x0080, 0x4059: 0x0080, 0x405a: 0x0080, 0x405b: 0x0080, 0x405c: 0x0080, 0x405d: 0x0080, + 0x405e: 0x0080, 0x405f: 0x0080, 0x4060: 0x0080, 0x4061: 0x0080, 0x4062: 0x0080, 0x4063: 0x0080, + 0x4064: 0x0080, 0x4065: 0x0080, 0x4066: 0x0080, 0x4067: 0x0080, 0x4068: 0x0080, 0x4069: 0x0080, + 0x406a: 0x0080, 0x406b: 0x0080, 0x406c: 0x0080, 0x406d: 0x0080, 0x406e: 0x0080, + 0x4070: 0x0080, 0x4071: 0x0080, 0x4072: 0x0080, 0x4073: 0x0080, 0x4074: 0x0080, + // Block 0x102, offset 0x4080 + 0x4080: 0x00c0, 0x4081: 0x00c0, 0x4082: 0x00c0, 0x4083: 0x00c0, + // Block 0x103, offset 0x40c0 + 0x40c0: 0x00c0, 0x40c1: 0x00c0, 0x40c2: 0x00c0, 0x40c3: 0x00c0, 0x40c4: 0x00c0, 0x40c5: 0x00c0, + 0x40c6: 0x00c0, 0x40c7: 0x00c0, 0x40c8: 0x00c0, 0x40c9: 0x00c0, 0x40ca: 0x00c0, 0x40cb: 0x00c0, + 0x40cc: 0x00c0, 0x40cd: 0x00c0, 0x40ce: 0x00c0, 0x40cf: 0x00c0, 0x40d0: 0x00c0, 0x40d1: 0x00c0, + 0x40d2: 0x00c0, 0x40d3: 0x00c0, 0x40d4: 0x00c0, 0x40d5: 0x00c0, 0x40d6: 0x00c0, 0x40d7: 0x00c0, + 0x40d8: 0x00c0, 0x40d9: 0x00c0, 0x40da: 0x00c0, 0x40db: 0x00c0, 0x40dc: 0x00c0, 0x40dd: 0x00c0, + 0x40de: 0x00c0, 0x40df: 0x00c0, 0x40e0: 0x00c0, 0x40e1: 0x00c0, 0x40e2: 0x00c0, 0x40e3: 0x00c0, + 0x40e4: 0x00c0, 0x40e5: 0x00c0, 0x40e6: 0x00c0, 0x40e7: 0x00c0, 0x40e8: 0x00c0, 0x40e9: 0x00c0, + 0x40ea: 0x00c0, 0x40eb: 0x00c0, 0x40ec: 0x00c0, 0x40ed: 0x00c0, 0x40ee: 0x00c0, + // Block 0x104, offset 0x4100 + 0x4100: 0x00c0, 0x4101: 0x00c0, 0x4102: 0x00c0, 0x4103: 0x00c0, 0x4104: 0x00c0, 0x4105: 0x00c0, + 0x4106: 0x00c0, + // Block 0x105, offset 0x4140 + 0x4140: 0x00c0, 0x4141: 0x00c0, 0x4142: 0x00c0, 0x4143: 0x00c0, 0x4144: 0x00c0, 0x4145: 0x00c0, + 0x4146: 0x00c0, 0x4147: 0x00c0, 0x4148: 0x00c0, 0x4149: 0x00c0, 0x414a: 0x00c0, 0x414b: 0x00c0, + 0x414c: 0x00c0, 0x414d: 0x00c0, 0x414e: 0x00c0, 0x414f: 0x00c0, 0x4150: 0x00c0, 0x4151: 0x00c0, + 0x4152: 0x00c0, 0x4153: 0x00c0, 0x4154: 0x00c0, 0x4155: 0x00c0, 0x4156: 0x00c0, 0x4157: 0x00c0, + 0x4158: 0x00c0, 0x4159: 0x00c0, 0x415a: 0x00c0, 0x415b: 0x00c0, 0x415c: 0x00c0, 0x415d: 0x00c0, + 0x415e: 0x00c0, 0x4160: 0x00c0, 0x4161: 0x00c0, 0x4162: 0x00c0, 0x4163: 0x00c0, + 0x4164: 0x00c0, 0x4165: 0x00c0, 0x4166: 0x00c0, 0x4167: 0x00c0, 0x4168: 0x00c0, 0x4169: 0x00c0, + 0x416e: 0x0080, 0x416f: 0x0080, + // Block 0x106, offset 0x4180 + 0x4190: 0x00c0, 0x4191: 0x00c0, + 0x4192: 0x00c0, 0x4193: 0x00c0, 0x4194: 0x00c0, 0x4195: 0x00c0, 0x4196: 0x00c0, 0x4197: 0x00c0, + 0x4198: 0x00c0, 0x4199: 0x00c0, 0x419a: 0x00c0, 0x419b: 0x00c0, 0x419c: 0x00c0, 0x419d: 0x00c0, + 0x419e: 0x00c0, 0x419f: 0x00c0, 0x41a0: 0x00c0, 0x41a1: 0x00c0, 0x41a2: 0x00c0, 0x41a3: 0x00c0, + 0x41a4: 0x00c0, 0x41a5: 0x00c0, 0x41a6: 0x00c0, 0x41a7: 0x00c0, 0x41a8: 0x00c0, 0x41a9: 0x00c0, + 0x41aa: 0x00c0, 0x41ab: 0x00c0, 0x41ac: 0x00c0, 0x41ad: 0x00c0, + 0x41b0: 0x00c3, 0x41b1: 0x00c3, 0x41b2: 0x00c3, 0x41b3: 0x00c3, 0x41b4: 0x00c3, 0x41b5: 0x0080, + // Block 0x107, offset 0x41c0 + 0x41c0: 0x00c0, 0x41c1: 0x00c0, 0x41c2: 0x00c0, 0x41c3: 0x00c0, 0x41c4: 0x00c0, 0x41c5: 0x00c0, + 0x41c6: 0x00c0, 0x41c7: 0x00c0, 0x41c8: 0x00c0, 0x41c9: 0x00c0, 0x41ca: 0x00c0, 0x41cb: 0x00c0, + 0x41cc: 0x00c0, 0x41cd: 0x00c0, 0x41ce: 0x00c0, 0x41cf: 0x00c0, 0x41d0: 0x00c0, 0x41d1: 0x00c0, + 0x41d2: 0x00c0, 0x41d3: 0x00c0, 0x41d4: 0x00c0, 0x41d5: 0x00c0, 0x41d6: 0x00c0, 0x41d7: 0x00c0, + 0x41d8: 0x00c0, 0x41d9: 0x00c0, 0x41da: 0x00c0, 0x41db: 0x00c0, 0x41dc: 0x00c0, 0x41dd: 0x00c0, + 0x41de: 0x00c0, 0x41df: 0x00c0, 0x41e0: 0x00c0, 0x41e1: 0x00c0, 0x41e2: 0x00c0, 0x41e3: 0x00c0, + 0x41e4: 0x00c0, 0x41e5: 0x00c0, 0x41e6: 0x00c0, 0x41e7: 0x00c0, 0x41e8: 0x00c0, 0x41e9: 0x00c0, + 0x41ea: 0x00c0, 0x41eb: 0x00c0, 0x41ec: 0x00c0, 0x41ed: 0x00c0, 0x41ee: 0x00c0, 0x41ef: 0x00c0, + 0x41f0: 0x00c3, 0x41f1: 0x00c3, 0x41f2: 0x00c3, 0x41f3: 0x00c3, 0x41f4: 0x00c3, 0x41f5: 0x00c3, + 0x41f6: 0x00c3, 0x41f7: 0x0080, 0x41f8: 0x0080, 0x41f9: 0x0080, 0x41fa: 0x0080, 0x41fb: 0x0080, + 0x41fc: 0x0080, 0x41fd: 0x0080, 0x41fe: 0x0080, 0x41ff: 0x0080, + // Block 0x108, offset 0x4200 + 0x4200: 0x00c0, 0x4201: 0x00c0, 0x4202: 0x00c0, 0x4203: 0x00c0, 0x4204: 0x0080, 0x4205: 0x0080, + 0x4210: 0x00c0, 0x4211: 0x00c0, + 0x4212: 0x00c0, 0x4213: 0x00c0, 0x4214: 0x00c0, 0x4215: 0x00c0, 0x4216: 0x00c0, 0x4217: 0x00c0, + 0x4218: 0x00c0, 0x4219: 0x00c0, 0x421b: 0x0080, 0x421c: 0x0080, 0x421d: 0x0080, + 0x421e: 0x0080, 0x421f: 0x0080, 0x4220: 0x0080, 0x4221: 0x0080, 0x4223: 0x00c0, + 0x4224: 0x00c0, 0x4225: 0x00c0, 0x4226: 0x00c0, 0x4227: 0x00c0, 0x4228: 0x00c0, 0x4229: 0x00c0, + 0x422a: 0x00c0, 0x422b: 0x00c0, 0x422c: 0x00c0, 0x422d: 0x00c0, 0x422e: 0x00c0, 0x422f: 0x00c0, + 0x4230: 0x00c0, 0x4231: 0x00c0, 0x4232: 0x00c0, 0x4233: 0x00c0, 0x4234: 0x00c0, 0x4235: 0x00c0, + 0x4236: 0x00c0, 0x4237: 0x00c0, + 0x423d: 0x00c0, 0x423e: 0x00c0, 0x423f: 0x00c0, + // Block 0x109, offset 0x4240 + 0x4240: 0x00c0, 0x4241: 0x00c0, 0x4242: 0x00c0, 0x4243: 0x00c0, 0x4244: 0x00c0, 0x4245: 0x00c0, + 0x4246: 0x00c0, 0x4247: 0x00c0, 0x4248: 0x00c0, 0x4249: 0x00c0, 0x424a: 0x00c0, 0x424b: 0x00c0, + 0x424c: 0x00c0, 0x424d: 0x00c0, 0x424e: 0x00c0, 0x424f: 0x00c0, + // Block 0x10a, offset 0x4280 + 0x4280: 0x00c0, 0x4281: 0x00c0, 0x4282: 0x00c0, 0x4283: 0x00c0, 0x4284: 0x00c0, + 0x4290: 0x00c0, 0x4291: 0x00c0, + 0x4292: 0x00c0, 0x4293: 0x00c0, 0x4294: 0x00c0, 0x4295: 0x00c0, 0x4296: 0x00c0, 0x4297: 0x00c0, + 0x4298: 0x00c0, 0x4299: 0x00c0, 0x429a: 0x00c0, 0x429b: 0x00c0, 0x429c: 0x00c0, 0x429d: 0x00c0, + 0x429e: 0x00c0, 0x429f: 0x00c0, 0x42a0: 0x00c0, 0x42a1: 0x00c0, 0x42a2: 0x00c0, 0x42a3: 0x00c0, + 0x42a4: 0x00c0, 0x42a5: 0x00c0, 0x42a6: 0x00c0, 0x42a7: 0x00c0, 0x42a8: 0x00c0, 0x42a9: 0x00c0, + 0x42aa: 0x00c0, 0x42ab: 0x00c0, 0x42ac: 0x00c0, 0x42ad: 0x00c0, 0x42ae: 0x00c0, 0x42af: 0x00c0, + 0x42b0: 0x00c0, 0x42b1: 0x00c0, 0x42b2: 0x00c0, 0x42b3: 0x00c0, 0x42b4: 0x00c0, 0x42b5: 0x00c0, + 0x42b6: 0x00c0, 0x42b7: 0x00c0, 0x42b8: 0x00c0, 0x42b9: 0x00c0, 0x42ba: 0x00c0, 0x42bb: 0x00c0, + 0x42bc: 0x00c0, 0x42bd: 0x00c0, 0x42be: 0x00c0, + // Block 0x10b, offset 0x42c0 + 0x42cf: 0x00c3, 0x42d0: 0x00c3, 0x42d1: 0x00c3, + 0x42d2: 0x00c3, 0x42d3: 0x00c0, 0x42d4: 0x00c0, 0x42d5: 0x00c0, 0x42d6: 0x00c0, 0x42d7: 0x00c0, + 0x42d8: 0x00c0, 0x42d9: 0x00c0, 0x42da: 0x00c0, 0x42db: 0x00c0, 0x42dc: 0x00c0, 0x42dd: 0x00c0, + 0x42de: 0x00c0, 0x42df: 0x00c0, + // Block 0x10c, offset 0x4300 + 0x4320: 0x00c0, + // Block 0x10d, offset 0x4340 + 0x4340: 0x00c0, 0x4341: 0x00c0, 0x4342: 0x00c0, 0x4343: 0x00c0, 0x4344: 0x00c0, 0x4345: 0x00c0, + 0x4346: 0x00c0, 0x4347: 0x00c0, 0x4348: 0x00c0, 0x4349: 0x00c0, 0x434a: 0x00c0, 0x434b: 0x00c0, + 0x434c: 0x00c0, 0x434d: 0x00c0, 0x434e: 0x00c0, 0x434f: 0x00c0, 0x4350: 0x00c0, 0x4351: 0x00c0, + 0x4352: 0x00c0, 0x4353: 0x00c0, 0x4354: 0x00c0, 0x4355: 0x00c0, 0x4356: 0x00c0, 0x4357: 0x00c0, + 0x4358: 0x00c0, 0x4359: 0x00c0, 0x435a: 0x00c0, 0x435b: 0x00c0, 0x435c: 0x00c0, 0x435d: 0x00c0, + 0x435e: 0x00c0, 0x435f: 0x00c0, 0x4360: 0x00c0, 0x4361: 0x00c0, 0x4362: 0x00c0, 0x4363: 0x00c0, + 0x4364: 0x00c0, 0x4365: 0x00c0, 0x4366: 0x00c0, 0x4367: 0x00c0, 0x4368: 0x00c0, 0x4369: 0x00c0, + 0x436a: 0x00c0, 0x436b: 0x00c0, 0x436c: 0x00c0, + // Block 0x10e, offset 0x4380 + 0x4380: 0x00cc, 0x4381: 0x00cc, + // Block 0x10f, offset 0x43c0 + 0x43c0: 0x00c0, 0x43c1: 0x00c0, 0x43c2: 0x00c0, 0x43c3: 0x00c0, 0x43c4: 0x00c0, 0x43c5: 0x00c0, + 0x43c6: 0x00c0, 0x43c7: 0x00c0, 0x43c8: 0x00c0, 0x43c9: 0x00c0, 0x43ca: 0x00c0, 0x43cb: 0x00c0, + 0x43cc: 0x00c0, 0x43cd: 0x00c0, 0x43ce: 0x00c0, 0x43cf: 0x00c0, 0x43d0: 0x00c0, 0x43d1: 0x00c0, + 0x43d2: 0x00c0, 0x43d3: 0x00c0, 0x43d4: 0x00c0, 0x43d5: 0x00c0, 0x43d6: 0x00c0, 0x43d7: 0x00c0, + 0x43d8: 0x00c0, 0x43d9: 0x00c0, 0x43da: 0x00c0, 0x43db: 0x00c0, 0x43dc: 0x00c0, 0x43dd: 0x00c0, + 0x43de: 0x00c0, 0x43df: 0x00c0, 0x43e0: 0x00c0, 0x43e1: 0x00c0, 0x43e2: 0x00c0, 0x43e3: 0x00c0, + 0x43e4: 0x00c0, 0x43e5: 0x00c0, 0x43e6: 0x00c0, 0x43e7: 0x00c0, 0x43e8: 0x00c0, 0x43e9: 0x00c0, + 0x43ea: 0x00c0, + 0x43f0: 0x00c0, 0x43f1: 0x00c0, 0x43f2: 0x00c0, 0x43f3: 0x00c0, 0x43f4: 0x00c0, 0x43f5: 0x00c0, + 0x43f6: 0x00c0, 0x43f7: 0x00c0, 0x43f8: 0x00c0, 0x43f9: 0x00c0, 0x43fa: 0x00c0, 0x43fb: 0x00c0, + 0x43fc: 0x00c0, + // Block 0x110, offset 0x4400 + 0x4400: 0x00c0, 0x4401: 0x00c0, 0x4402: 0x00c0, 0x4403: 0x00c0, 0x4404: 0x00c0, 0x4405: 0x00c0, + 0x4406: 0x00c0, 0x4407: 0x00c0, 0x4408: 0x00c0, + 0x4410: 0x00c0, 0x4411: 0x00c0, + 0x4412: 0x00c0, 0x4413: 0x00c0, 0x4414: 0x00c0, 0x4415: 0x00c0, 0x4416: 0x00c0, 0x4417: 0x00c0, + 0x4418: 0x00c0, 0x4419: 0x00c0, 0x441c: 0x0080, 0x441d: 0x00c3, + 0x441e: 0x00c3, 0x441f: 0x0080, 0x4420: 0x0040, 0x4421: 0x0040, 0x4422: 0x0040, 0x4423: 0x0040, + // Block 0x111, offset 0x4440 + 0x4440: 0x0080, 0x4441: 0x0080, 0x4442: 0x0080, 0x4443: 0x0080, 0x4444: 0x0080, 0x4445: 0x0080, + 0x4446: 0x0080, 0x4447: 0x0080, 0x4448: 0x0080, 0x4449: 0x0080, 0x444a: 0x0080, 0x444b: 0x0080, + 0x444c: 0x0080, 0x444d: 0x0080, 0x444e: 0x0080, 0x444f: 0x0080, 0x4450: 0x0080, 0x4451: 0x0080, + 0x4452: 0x0080, 0x4453: 0x0080, 0x4454: 0x0080, 0x4455: 0x0080, 0x4456: 0x0080, 0x4457: 0x0080, + 0x4458: 0x0080, 0x4459: 0x0080, 0x445a: 0x0080, 0x445b: 0x0080, 0x445c: 0x0080, 0x445d: 0x0080, + 0x445e: 0x0080, 0x445f: 0x0080, 0x4460: 0x0080, 0x4461: 0x0080, 0x4462: 0x0080, 0x4463: 0x0080, + 0x4464: 0x0080, 0x4465: 0x0080, 0x4466: 0x0080, 0x4467: 0x0080, 0x4468: 0x0080, 0x4469: 0x0080, + 0x446a: 0x0080, 0x446b: 0x0080, 0x446c: 0x0080, 0x446d: 0x0080, 0x446e: 0x0080, 0x446f: 0x0080, + 0x4470: 0x0080, 0x4471: 0x0080, 0x4472: 0x0080, 0x4473: 0x0080, 0x4474: 0x0080, 0x4475: 0x0080, + // Block 0x112, offset 0x4480 + 0x4480: 0x0080, 0x4481: 0x0080, 0x4482: 0x0080, 0x4483: 0x0080, 0x4484: 0x0080, 0x4485: 0x0080, + 0x4486: 0x0080, 0x4487: 0x0080, 0x4488: 0x0080, 0x4489: 0x0080, 0x448a: 0x0080, 0x448b: 0x0080, + 0x448c: 0x0080, 0x448d: 0x0080, 0x448e: 0x0080, 0x448f: 0x0080, 0x4490: 0x0080, 0x4491: 0x0080, + 0x4492: 0x0080, 0x4493: 0x0080, 0x4494: 0x0080, 0x4495: 0x0080, 0x4496: 0x0080, 0x4497: 0x0080, + 0x4498: 0x0080, 0x4499: 0x0080, 0x449a: 0x0080, 0x449b: 0x0080, 0x449c: 0x0080, 0x449d: 0x0080, + 0x449e: 0x0080, 0x449f: 0x0080, 0x44a0: 0x0080, 0x44a1: 0x0080, 0x44a2: 0x0080, 0x44a3: 0x0080, + 0x44a4: 0x0080, 0x44a5: 0x0080, 0x44a6: 0x0080, 0x44a9: 0x0080, + 0x44aa: 0x0080, 0x44ab: 0x0080, 0x44ac: 0x0080, 0x44ad: 0x0080, 0x44ae: 0x0080, 0x44af: 0x0080, + 0x44b0: 0x0080, 0x44b1: 0x0080, 0x44b2: 0x0080, 0x44b3: 0x0080, 0x44b4: 0x0080, 0x44b5: 0x0080, + 0x44b6: 0x0080, 0x44b7: 0x0080, 0x44b8: 0x0080, 0x44b9: 0x0080, 0x44ba: 0x0080, 0x44bb: 0x0080, + 0x44bc: 0x0080, 0x44bd: 0x0080, 0x44be: 0x0080, 0x44bf: 0x0080, + // Block 0x113, offset 0x44c0 + 0x44c0: 0x0080, 0x44c1: 0x0080, 0x44c2: 0x0080, 0x44c3: 0x0080, 0x44c4: 0x0080, 0x44c5: 0x0080, + 0x44c6: 0x0080, 0x44c7: 0x0080, 0x44c8: 0x0080, 0x44c9: 0x0080, 0x44ca: 0x0080, 0x44cb: 0x0080, + 0x44cc: 0x0080, 0x44cd: 0x0080, 0x44ce: 0x0080, 0x44cf: 0x0080, 0x44d0: 0x0080, 0x44d1: 0x0080, + 0x44d2: 0x0080, 0x44d3: 0x0080, 0x44d4: 0x0080, 0x44d5: 0x0080, 0x44d6: 0x0080, 0x44d7: 0x0080, + 0x44d8: 0x0080, 0x44d9: 0x0080, 0x44da: 0x0080, 0x44db: 0x0080, 0x44dc: 0x0080, 0x44dd: 0x0080, + 0x44de: 0x0080, 0x44df: 0x0080, 0x44e0: 0x0080, 0x44e1: 0x0080, 0x44e2: 0x0080, 0x44e3: 0x0080, + 0x44e4: 0x0080, 0x44e5: 0x00c0, 0x44e6: 0x00c0, 0x44e7: 0x00c3, 0x44e8: 0x00c3, 0x44e9: 0x00c3, + 0x44ea: 0x0080, 0x44eb: 0x0080, 0x44ec: 0x0080, 0x44ed: 0x00c0, 0x44ee: 0x00c0, 0x44ef: 0x00c0, + 0x44f0: 0x00c0, 0x44f1: 0x00c0, 0x44f2: 0x00c0, 0x44f3: 0x0040, 0x44f4: 0x0040, 0x44f5: 0x0040, + 0x44f6: 0x0040, 0x44f7: 0x0040, 0x44f8: 0x0040, 0x44f9: 0x0040, 0x44fa: 0x0040, 0x44fb: 0x00c3, + 0x44fc: 0x00c3, 0x44fd: 0x00c3, 0x44fe: 0x00c3, 0x44ff: 0x00c3, + // Block 0x114, offset 0x4500 + 0x4500: 0x00c3, 0x4501: 0x00c3, 0x4502: 0x00c3, 0x4503: 0x0080, 0x4504: 0x0080, 0x4505: 0x00c3, + 0x4506: 0x00c3, 0x4507: 0x00c3, 0x4508: 0x00c3, 0x4509: 0x00c3, 0x450a: 0x00c3, 0x450b: 0x00c3, + 0x450c: 0x0080, 0x450d: 0x0080, 0x450e: 0x0080, 0x450f: 0x0080, 0x4510: 0x0080, 0x4511: 0x0080, + 0x4512: 0x0080, 0x4513: 0x0080, 0x4514: 0x0080, 0x4515: 0x0080, 0x4516: 0x0080, 0x4517: 0x0080, + 0x4518: 0x0080, 0x4519: 0x0080, 0x451a: 0x0080, 0x451b: 0x0080, 0x451c: 0x0080, 0x451d: 0x0080, + 0x451e: 0x0080, 0x451f: 0x0080, 0x4520: 0x0080, 0x4521: 0x0080, 0x4522: 0x0080, 0x4523: 0x0080, + 0x4524: 0x0080, 0x4525: 0x0080, 0x4526: 0x0080, 0x4527: 0x0080, 0x4528: 0x0080, 0x4529: 0x0080, + 0x452a: 0x00c3, 0x452b: 0x00c3, 0x452c: 0x00c3, 0x452d: 0x00c3, 0x452e: 0x0080, 0x452f: 0x0080, + 0x4530: 0x0080, 0x4531: 0x0080, 0x4532: 0x0080, 0x4533: 0x0080, 0x4534: 0x0080, 0x4535: 0x0080, + 0x4536: 0x0080, 0x4537: 0x0080, 0x4538: 0x0080, 0x4539: 0x0080, 0x453a: 0x0080, 0x453b: 0x0080, + 0x453c: 0x0080, 0x453d: 0x0080, 0x453e: 0x0080, 0x453f: 0x0080, + // Block 0x115, offset 0x4540 + 0x4540: 0x0080, 0x4541: 0x0080, 0x4542: 0x0080, 0x4543: 0x0080, 0x4544: 0x0080, 0x4545: 0x0080, + 0x4546: 0x0080, 0x4547: 0x0080, 0x4548: 0x0080, 0x4549: 0x0080, 0x454a: 0x0080, 0x454b: 0x0080, + 0x454c: 0x0080, 0x454d: 0x0080, 0x454e: 0x0080, 0x454f: 0x0080, 0x4550: 0x0080, 0x4551: 0x0080, + 0x4552: 0x0080, 0x4553: 0x0080, 0x4554: 0x0080, 0x4555: 0x0080, 0x4556: 0x0080, 0x4557: 0x0080, + 0x4558: 0x0080, 0x4559: 0x0080, 0x455a: 0x0080, 0x455b: 0x0080, 0x455c: 0x0080, 0x455d: 0x0080, + 0x455e: 0x0080, 0x455f: 0x0080, 0x4560: 0x0080, 0x4561: 0x0080, 0x4562: 0x0080, 0x4563: 0x0080, + 0x4564: 0x0080, 0x4565: 0x0080, 0x4566: 0x0080, 0x4567: 0x0080, 0x4568: 0x0080, + // Block 0x116, offset 0x4580 + 0x4580: 0x0088, 0x4581: 0x0088, 0x4582: 0x00c9, 0x4583: 0x00c9, 0x4584: 0x00c9, 0x4585: 0x0088, + // Block 0x117, offset 0x45c0 + 0x45c0: 0x0080, 0x45c1: 0x0080, 0x45c2: 0x0080, 0x45c3: 0x0080, 0x45c4: 0x0080, 0x45c5: 0x0080, + 0x45c6: 0x0080, 0x45c7: 0x0080, 0x45c8: 0x0080, 0x45c9: 0x0080, 0x45ca: 0x0080, 0x45cb: 0x0080, + 0x45cc: 0x0080, 0x45cd: 0x0080, 0x45ce: 0x0080, 0x45cf: 0x0080, 0x45d0: 0x0080, 0x45d1: 0x0080, + 0x45d2: 0x0080, 0x45d3: 0x0080, 0x45d4: 0x0080, 0x45d5: 0x0080, 0x45d6: 0x0080, + 0x45e0: 0x0080, 0x45e1: 0x0080, 0x45e2: 0x0080, 0x45e3: 0x0080, + 0x45e4: 0x0080, 0x45e5: 0x0080, 0x45e6: 0x0080, 0x45e7: 0x0080, 0x45e8: 0x0080, 0x45e9: 0x0080, + 0x45ea: 0x0080, 0x45eb: 0x0080, 0x45ec: 0x0080, 0x45ed: 0x0080, 0x45ee: 0x0080, 0x45ef: 0x0080, + 0x45f0: 0x0080, 0x45f1: 0x0080, + // Block 0x118, offset 0x4600 + 0x4600: 0x0080, 0x4601: 0x0080, 0x4602: 0x0080, 0x4603: 0x0080, 0x4604: 0x0080, 0x4605: 0x0080, + 0x4606: 0x0080, 0x4607: 0x0080, 0x4608: 0x0080, 0x4609: 0x0080, 0x460a: 0x0080, 0x460b: 0x0080, + 0x460c: 0x0080, 0x460d: 0x0080, 0x460e: 0x0080, 0x460f: 0x0080, 0x4610: 0x0080, 0x4611: 0x0080, + 0x4612: 0x0080, 0x4613: 0x0080, 0x4614: 0x0080, 0x4616: 0x0080, 0x4617: 0x0080, + 0x4618: 0x0080, 0x4619: 0x0080, 0x461a: 0x0080, 0x461b: 0x0080, 0x461c: 0x0080, 0x461d: 0x0080, + 0x461e: 0x0080, 0x461f: 0x0080, 0x4620: 0x0080, 0x4621: 0x0080, 0x4622: 0x0080, 0x4623: 0x0080, + 0x4624: 0x0080, 0x4625: 0x0080, 0x4626: 0x0080, 0x4627: 0x0080, 0x4628: 0x0080, 0x4629: 0x0080, + 0x462a: 0x0080, 0x462b: 0x0080, 0x462c: 0x0080, 0x462d: 0x0080, 0x462e: 0x0080, 0x462f: 0x0080, + 0x4630: 0x0080, 0x4631: 0x0080, 0x4632: 0x0080, 0x4633: 0x0080, 0x4634: 0x0080, 0x4635: 0x0080, + 0x4636: 0x0080, 0x4637: 0x0080, 0x4638: 0x0080, 0x4639: 0x0080, 0x463a: 0x0080, 0x463b: 0x0080, + 0x463c: 0x0080, 0x463d: 0x0080, 0x463e: 0x0080, 0x463f: 0x0080, + // Block 0x119, offset 0x4640 + 0x4640: 0x0080, 0x4641: 0x0080, 0x4642: 0x0080, 0x4643: 0x0080, 0x4644: 0x0080, 0x4645: 0x0080, + 0x4646: 0x0080, 0x4647: 0x0080, 0x4648: 0x0080, 0x4649: 0x0080, 0x464a: 0x0080, 0x464b: 0x0080, + 0x464c: 0x0080, 0x464d: 0x0080, 0x464e: 0x0080, 0x464f: 0x0080, 0x4650: 0x0080, 0x4651: 0x0080, + 0x4652: 0x0080, 0x4653: 0x0080, 0x4654: 0x0080, 0x4655: 0x0080, 0x4656: 0x0080, 0x4657: 0x0080, + 0x4658: 0x0080, 0x4659: 0x0080, 0x465a: 0x0080, 0x465b: 0x0080, 0x465c: 0x0080, + 0x465e: 0x0080, 0x465f: 0x0080, 0x4662: 0x0080, + 0x4665: 0x0080, 0x4666: 0x0080, 0x4669: 0x0080, + 0x466a: 0x0080, 0x466b: 0x0080, 0x466c: 0x0080, 0x466e: 0x0080, 0x466f: 0x0080, + 0x4670: 0x0080, 0x4671: 0x0080, 0x4672: 0x0080, 0x4673: 0x0080, 0x4674: 0x0080, 0x4675: 0x0080, + 0x4676: 0x0080, 0x4677: 0x0080, 0x4678: 0x0080, 0x4679: 0x0080, 0x467b: 0x0080, + 0x467d: 0x0080, 0x467e: 0x0080, 0x467f: 0x0080, + // Block 0x11a, offset 0x4680 + 0x4680: 0x0080, 0x4681: 0x0080, 0x4682: 0x0080, 0x4683: 0x0080, 0x4685: 0x0080, + 0x4686: 0x0080, 0x4687: 0x0080, 0x4688: 0x0080, 0x4689: 0x0080, 0x468a: 0x0080, 0x468b: 0x0080, + 0x468c: 0x0080, 0x468d: 0x0080, 0x468e: 0x0080, 0x468f: 0x0080, 0x4690: 0x0080, 0x4691: 0x0080, + 0x4692: 0x0080, 0x4693: 0x0080, 0x4694: 0x0080, 0x4695: 0x0080, 0x4696: 0x0080, 0x4697: 0x0080, + 0x4698: 0x0080, 0x4699: 0x0080, 0x469a: 0x0080, 0x469b: 0x0080, 0x469c: 0x0080, 0x469d: 0x0080, + 0x469e: 0x0080, 0x469f: 0x0080, 0x46a0: 0x0080, 0x46a1: 0x0080, 0x46a2: 0x0080, 0x46a3: 0x0080, + 0x46a4: 0x0080, 0x46a5: 0x0080, 0x46a6: 0x0080, 0x46a7: 0x0080, 0x46a8: 0x0080, 0x46a9: 0x0080, + 0x46aa: 0x0080, 0x46ab: 0x0080, 0x46ac: 0x0080, 0x46ad: 0x0080, 0x46ae: 0x0080, 0x46af: 0x0080, + 0x46b0: 0x0080, 0x46b1: 0x0080, 0x46b2: 0x0080, 0x46b3: 0x0080, 0x46b4: 0x0080, 0x46b5: 0x0080, + 0x46b6: 0x0080, 0x46b7: 0x0080, 0x46b8: 0x0080, 0x46b9: 0x0080, 0x46ba: 0x0080, 0x46bb: 0x0080, + 0x46bc: 0x0080, 0x46bd: 0x0080, 0x46be: 0x0080, 0x46bf: 0x0080, + // Block 0x11b, offset 0x46c0 + 0x46c0: 0x0080, 0x46c1: 0x0080, 0x46c2: 0x0080, 0x46c3: 0x0080, 0x46c4: 0x0080, 0x46c5: 0x0080, + 0x46c7: 0x0080, 0x46c8: 0x0080, 0x46c9: 0x0080, 0x46ca: 0x0080, + 0x46cd: 0x0080, 0x46ce: 0x0080, 0x46cf: 0x0080, 0x46d0: 0x0080, 0x46d1: 0x0080, + 0x46d2: 0x0080, 0x46d3: 0x0080, 0x46d4: 0x0080, 0x46d6: 0x0080, 0x46d7: 0x0080, + 0x46d8: 0x0080, 0x46d9: 0x0080, 0x46da: 0x0080, 0x46db: 0x0080, 0x46dc: 0x0080, + 0x46de: 0x0080, 0x46df: 0x0080, 0x46e0: 0x0080, 0x46e1: 0x0080, 0x46e2: 0x0080, 0x46e3: 0x0080, + 0x46e4: 0x0080, 0x46e5: 0x0080, 0x46e6: 0x0080, 0x46e7: 0x0080, 0x46e8: 0x0080, 0x46e9: 0x0080, + 0x46ea: 0x0080, 0x46eb: 0x0080, 0x46ec: 0x0080, 0x46ed: 0x0080, 0x46ee: 0x0080, 0x46ef: 0x0080, + 0x46f0: 0x0080, 0x46f1: 0x0080, 0x46f2: 0x0080, 0x46f3: 0x0080, 0x46f4: 0x0080, 0x46f5: 0x0080, + 0x46f6: 0x0080, 0x46f7: 0x0080, 0x46f8: 0x0080, 0x46f9: 0x0080, 0x46fb: 0x0080, + 0x46fc: 0x0080, 0x46fd: 0x0080, 0x46fe: 0x0080, + // Block 0x11c, offset 0x4700 + 0x4700: 0x0080, 0x4701: 0x0080, 0x4702: 0x0080, 0x4703: 0x0080, 0x4704: 0x0080, + 0x4706: 0x0080, 0x470a: 0x0080, 0x470b: 0x0080, + 0x470c: 0x0080, 0x470d: 0x0080, 0x470e: 0x0080, 0x470f: 0x0080, 0x4710: 0x0080, + 0x4712: 0x0080, 0x4713: 0x0080, 0x4714: 0x0080, 0x4715: 0x0080, 0x4716: 0x0080, 0x4717: 0x0080, + 0x4718: 0x0080, 0x4719: 0x0080, 0x471a: 0x0080, 0x471b: 0x0080, 0x471c: 0x0080, 0x471d: 0x0080, + 0x471e: 0x0080, 0x471f: 0x0080, 0x4720: 0x0080, 0x4721: 0x0080, 0x4722: 0x0080, 0x4723: 0x0080, + 0x4724: 0x0080, 0x4725: 0x0080, 0x4726: 0x0080, 0x4727: 0x0080, 0x4728: 0x0080, 0x4729: 0x0080, + 0x472a: 0x0080, 0x472b: 0x0080, 0x472c: 0x0080, 0x472d: 0x0080, 0x472e: 0x0080, 0x472f: 0x0080, + 0x4730: 0x0080, 0x4731: 0x0080, 0x4732: 0x0080, 0x4733: 0x0080, 0x4734: 0x0080, 0x4735: 0x0080, + 0x4736: 0x0080, 0x4737: 0x0080, 0x4738: 0x0080, 0x4739: 0x0080, 0x473a: 0x0080, 0x473b: 0x0080, + 0x473c: 0x0080, 0x473d: 0x0080, 0x473e: 0x0080, 0x473f: 0x0080, + // Block 0x11d, offset 0x4740 + 0x4740: 0x0080, 0x4741: 0x0080, 0x4742: 0x0080, 0x4743: 0x0080, 0x4744: 0x0080, 0x4745: 0x0080, + 0x4746: 0x0080, 0x4747: 0x0080, 0x4748: 0x0080, 0x4749: 0x0080, 0x474a: 0x0080, 0x474b: 0x0080, + 0x474c: 0x0080, 0x474d: 0x0080, 0x474e: 0x0080, 0x474f: 0x0080, 0x4750: 0x0080, 0x4751: 0x0080, + 0x4752: 0x0080, 0x4753: 0x0080, 0x4754: 0x0080, 0x4755: 0x0080, 0x4756: 0x0080, 0x4757: 0x0080, + 0x4758: 0x0080, 0x4759: 0x0080, 0x475a: 0x0080, 0x475b: 0x0080, 0x475c: 0x0080, 0x475d: 0x0080, + 0x475e: 0x0080, 0x475f: 0x0080, 0x4760: 0x0080, 0x4761: 0x0080, 0x4762: 0x0080, 0x4763: 0x0080, + 0x4764: 0x0080, 0x4765: 0x0080, 0x4768: 0x0080, 0x4769: 0x0080, + 0x476a: 0x0080, 0x476b: 0x0080, 0x476c: 0x0080, 0x476d: 0x0080, 0x476e: 0x0080, 0x476f: 0x0080, + 0x4770: 0x0080, 0x4771: 0x0080, 0x4772: 0x0080, 0x4773: 0x0080, 0x4774: 0x0080, 0x4775: 0x0080, + 0x4776: 0x0080, 0x4777: 0x0080, 0x4778: 0x0080, 0x4779: 0x0080, 0x477a: 0x0080, 0x477b: 0x0080, + 0x477c: 0x0080, 0x477d: 0x0080, 0x477e: 0x0080, 0x477f: 0x0080, + // Block 0x11e, offset 0x4780 + 0x4780: 0x0080, 0x4781: 0x0080, 0x4782: 0x0080, 0x4783: 0x0080, 0x4784: 0x0080, 0x4785: 0x0080, + 0x4786: 0x0080, 0x4787: 0x0080, 0x4788: 0x0080, 0x4789: 0x0080, 0x478a: 0x0080, 0x478b: 0x0080, + 0x478e: 0x0080, 0x478f: 0x0080, 0x4790: 0x0080, 0x4791: 0x0080, + 0x4792: 0x0080, 0x4793: 0x0080, 0x4794: 0x0080, 0x4795: 0x0080, 0x4796: 0x0080, 0x4797: 0x0080, + 0x4798: 0x0080, 0x4799: 0x0080, 0x479a: 0x0080, 0x479b: 0x0080, 0x479c: 0x0080, 0x479d: 0x0080, + 0x479e: 0x0080, 0x479f: 0x0080, 0x47a0: 0x0080, 0x47a1: 0x0080, 0x47a2: 0x0080, 0x47a3: 0x0080, + 0x47a4: 0x0080, 0x47a5: 0x0080, 0x47a6: 0x0080, 0x47a7: 0x0080, 0x47a8: 0x0080, 0x47a9: 0x0080, + 0x47aa: 0x0080, 0x47ab: 0x0080, 0x47ac: 0x0080, 0x47ad: 0x0080, 0x47ae: 0x0080, 0x47af: 0x0080, + 0x47b0: 0x0080, 0x47b1: 0x0080, 0x47b2: 0x0080, 0x47b3: 0x0080, 0x47b4: 0x0080, 0x47b5: 0x0080, + 0x47b6: 0x0080, 0x47b7: 0x0080, 0x47b8: 0x0080, 0x47b9: 0x0080, 0x47ba: 0x0080, 0x47bb: 0x0080, + 0x47bc: 0x0080, 0x47bd: 0x0080, 0x47be: 0x0080, 0x47bf: 0x0080, + // Block 0x11f, offset 0x47c0 + 0x47c0: 0x00c3, 0x47c1: 0x00c3, 0x47c2: 0x00c3, 0x47c3: 0x00c3, 0x47c4: 0x00c3, 0x47c5: 0x00c3, + 0x47c6: 0x00c3, 0x47c7: 0x00c3, 0x47c8: 0x00c3, 0x47c9: 0x00c3, 0x47ca: 0x00c3, 0x47cb: 0x00c3, + 0x47cc: 0x00c3, 0x47cd: 0x00c3, 0x47ce: 0x00c3, 0x47cf: 0x00c3, 0x47d0: 0x00c3, 0x47d1: 0x00c3, + 0x47d2: 0x00c3, 0x47d3: 0x00c3, 0x47d4: 0x00c3, 0x47d5: 0x00c3, 0x47d6: 0x00c3, 0x47d7: 0x00c3, + 0x47d8: 0x00c3, 0x47d9: 0x00c3, 0x47da: 0x00c3, 0x47db: 0x00c3, 0x47dc: 0x00c3, 0x47dd: 0x00c3, + 0x47de: 0x00c3, 0x47df: 0x00c3, 0x47e0: 0x00c3, 0x47e1: 0x00c3, 0x47e2: 0x00c3, 0x47e3: 0x00c3, + 0x47e4: 0x00c3, 0x47e5: 0x00c3, 0x47e6: 0x00c3, 0x47e7: 0x00c3, 0x47e8: 0x00c3, 0x47e9: 0x00c3, + 0x47ea: 0x00c3, 0x47eb: 0x00c3, 0x47ec: 0x00c3, 0x47ed: 0x00c3, 0x47ee: 0x00c3, 0x47ef: 0x00c3, + 0x47f0: 0x00c3, 0x47f1: 0x00c3, 0x47f2: 0x00c3, 0x47f3: 0x00c3, 0x47f4: 0x00c3, 0x47f5: 0x00c3, + 0x47f6: 0x00c3, 0x47f7: 0x0080, 0x47f8: 0x0080, 0x47f9: 0x0080, 0x47fa: 0x0080, 0x47fb: 0x00c3, + 0x47fc: 0x00c3, 0x47fd: 0x00c3, 0x47fe: 0x00c3, 0x47ff: 0x00c3, + // Block 0x120, offset 0x4800 + 0x4800: 0x00c3, 0x4801: 0x00c3, 0x4802: 0x00c3, 0x4803: 0x00c3, 0x4804: 0x00c3, 0x4805: 0x00c3, + 0x4806: 0x00c3, 0x4807: 0x00c3, 0x4808: 0x00c3, 0x4809: 0x00c3, 0x480a: 0x00c3, 0x480b: 0x00c3, + 0x480c: 0x00c3, 0x480d: 0x00c3, 0x480e: 0x00c3, 0x480f: 0x00c3, 0x4810: 0x00c3, 0x4811: 0x00c3, + 0x4812: 0x00c3, 0x4813: 0x00c3, 0x4814: 0x00c3, 0x4815: 0x00c3, 0x4816: 0x00c3, 0x4817: 0x00c3, + 0x4818: 0x00c3, 0x4819: 0x00c3, 0x481a: 0x00c3, 0x481b: 0x00c3, 0x481c: 0x00c3, 0x481d: 0x00c3, + 0x481e: 0x00c3, 0x481f: 0x00c3, 0x4820: 0x00c3, 0x4821: 0x00c3, 0x4822: 0x00c3, 0x4823: 0x00c3, + 0x4824: 0x00c3, 0x4825: 0x00c3, 0x4826: 0x00c3, 0x4827: 0x00c3, 0x4828: 0x00c3, 0x4829: 0x00c3, + 0x482a: 0x00c3, 0x482b: 0x00c3, 0x482c: 0x00c3, 0x482d: 0x0080, 0x482e: 0x0080, 0x482f: 0x0080, + 0x4830: 0x0080, 0x4831: 0x0080, 0x4832: 0x0080, 0x4833: 0x0080, 0x4834: 0x0080, 0x4835: 0x00c3, + 0x4836: 0x0080, 0x4837: 0x0080, 0x4838: 0x0080, 0x4839: 0x0080, 0x483a: 0x0080, 0x483b: 0x0080, + 0x483c: 0x0080, 0x483d: 0x0080, 0x483e: 0x0080, 0x483f: 0x0080, + // Block 0x121, offset 0x4840 + 0x4840: 0x0080, 0x4841: 0x0080, 0x4842: 0x0080, 0x4843: 0x0080, 0x4844: 0x00c3, 0x4845: 0x0080, + 0x4846: 0x0080, 0x4847: 0x0080, 0x4848: 0x0080, 0x4849: 0x0080, 0x484a: 0x0080, 0x484b: 0x0080, + 0x485b: 0x00c3, 0x485c: 0x00c3, 0x485d: 0x00c3, + 0x485e: 0x00c3, 0x485f: 0x00c3, 0x4861: 0x00c3, 0x4862: 0x00c3, 0x4863: 0x00c3, + 0x4864: 0x00c3, 0x4865: 0x00c3, 0x4866: 0x00c3, 0x4867: 0x00c3, 0x4868: 0x00c3, 0x4869: 0x00c3, + 0x486a: 0x00c3, 0x486b: 0x00c3, 0x486c: 0x00c3, 0x486d: 0x00c3, 0x486e: 0x00c3, 0x486f: 0x00c3, + // Block 0x122, offset 0x4880 + 0x4880: 0x00c3, 0x4881: 0x00c3, 0x4882: 0x00c3, 0x4883: 0x00c3, 0x4884: 0x00c3, 0x4885: 0x00c3, + 0x4886: 0x00c3, 0x4888: 0x00c3, 0x4889: 0x00c3, 0x488a: 0x00c3, 0x488b: 0x00c3, + 0x488c: 0x00c3, 0x488d: 0x00c3, 0x488e: 0x00c3, 0x488f: 0x00c3, 0x4890: 0x00c3, 0x4891: 0x00c3, + 0x4892: 0x00c3, 0x4893: 0x00c3, 0x4894: 0x00c3, 0x4895: 0x00c3, 0x4896: 0x00c3, 0x4897: 0x00c3, + 0x4898: 0x00c3, 0x489b: 0x00c3, 0x489c: 0x00c3, 0x489d: 0x00c3, + 0x489e: 0x00c3, 0x489f: 0x00c3, 0x48a0: 0x00c3, 0x48a1: 0x00c3, 0x48a3: 0x00c3, + 0x48a4: 0x00c3, 0x48a6: 0x00c3, 0x48a7: 0x00c3, 0x48a8: 0x00c3, 0x48a9: 0x00c3, + 0x48aa: 0x00c3, + // Block 0x123, offset 0x48c0 + 0x48c0: 0x00c0, 0x48c1: 0x00c0, 0x48c2: 0x00c0, 0x48c3: 0x00c0, 0x48c4: 0x00c0, + 0x48c7: 0x0080, 0x48c8: 0x0080, 0x48c9: 0x0080, 0x48ca: 0x0080, 0x48cb: 0x0080, + 0x48cc: 0x0080, 0x48cd: 0x0080, 0x48ce: 0x0080, 0x48cf: 0x0080, 0x48d0: 0x00c3, 0x48d1: 0x00c3, + 0x48d2: 0x00c3, 0x48d3: 0x00c3, 0x48d4: 0x00c3, 0x48d5: 0x00c3, 0x48d6: 0x00c3, + // Block 0x124, offset 0x4900 + 0x4900: 0x00c2, 0x4901: 0x00c2, 0x4902: 0x00c2, 0x4903: 0x00c2, 0x4904: 0x00c2, 0x4905: 0x00c2, + 0x4906: 0x00c2, 0x4907: 0x00c2, 0x4908: 0x00c2, 0x4909: 0x00c2, 0x490a: 0x00c2, 0x490b: 0x00c2, + 0x490c: 0x00c2, 0x490d: 0x00c2, 0x490e: 0x00c2, 0x490f: 0x00c2, 0x4910: 0x00c2, 0x4911: 0x00c2, + 0x4912: 0x00c2, 0x4913: 0x00c2, 0x4914: 0x00c2, 0x4915: 0x00c2, 0x4916: 0x00c2, 0x4917: 0x00c2, + 0x4918: 0x00c2, 0x4919: 0x00c2, 0x491a: 0x00c2, 0x491b: 0x00c2, 0x491c: 0x00c2, 0x491d: 0x00c2, + 0x491e: 0x00c2, 0x491f: 0x00c2, 0x4920: 0x00c2, 0x4921: 0x00c2, 0x4922: 0x00c2, 0x4923: 0x00c2, + 0x4924: 0x00c2, 0x4925: 0x00c2, 0x4926: 0x00c2, 0x4927: 0x00c2, 0x4928: 0x00c2, 0x4929: 0x00c2, + 0x492a: 0x00c2, 0x492b: 0x00c2, 0x492c: 0x00c2, 0x492d: 0x00c2, 0x492e: 0x00c2, 0x492f: 0x00c2, + 0x4930: 0x00c2, 0x4931: 0x00c2, 0x4932: 0x00c2, 0x4933: 0x00c2, 0x4934: 0x00c2, 0x4935: 0x00c2, + 0x4936: 0x00c2, 0x4937: 0x00c2, 0x4938: 0x00c2, 0x4939: 0x00c2, 0x493a: 0x00c2, 0x493b: 0x00c2, + 0x493c: 0x00c2, 0x493d: 0x00c2, 0x493e: 0x00c2, 0x493f: 0x00c2, + // Block 0x125, offset 0x4940 + 0x4940: 0x00c2, 0x4941: 0x00c2, 0x4942: 0x00c2, 0x4943: 0x00c2, 0x4944: 0x00c3, 0x4945: 0x00c3, + 0x4946: 0x00c3, 0x4947: 0x00c3, 0x4948: 0x00c3, 0x4949: 0x00c3, 0x494a: 0x00c3, + 0x4950: 0x00c0, 0x4951: 0x00c0, + 0x4952: 0x00c0, 0x4953: 0x00c0, 0x4954: 0x00c0, 0x4955: 0x00c0, 0x4956: 0x00c0, 0x4957: 0x00c0, + 0x4958: 0x00c0, 0x4959: 0x00c0, + 0x495e: 0x0080, 0x495f: 0x0080, + // Block 0x126, offset 0x4980 + 0x4980: 0x0080, 0x4981: 0x0080, 0x4982: 0x0080, 0x4983: 0x0080, 0x4985: 0x0080, + 0x4986: 0x0080, 0x4987: 0x0080, 0x4988: 0x0080, 0x4989: 0x0080, 0x498a: 0x0080, 0x498b: 0x0080, + 0x498c: 0x0080, 0x498d: 0x0080, 0x498e: 0x0080, 0x498f: 0x0080, 0x4990: 0x0080, 0x4991: 0x0080, + 0x4992: 0x0080, 0x4993: 0x0080, 0x4994: 0x0080, 0x4995: 0x0080, 0x4996: 0x0080, 0x4997: 0x0080, + 0x4998: 0x0080, 0x4999: 0x0080, 0x499a: 0x0080, 0x499b: 0x0080, 0x499c: 0x0080, 0x499d: 0x0080, + 0x499e: 0x0080, 0x499f: 0x0080, 0x49a1: 0x0080, 0x49a2: 0x0080, + 0x49a4: 0x0080, 0x49a7: 0x0080, 0x49a9: 0x0080, + 0x49aa: 0x0080, 0x49ab: 0x0080, 0x49ac: 0x0080, 0x49ad: 0x0080, 0x49ae: 0x0080, 0x49af: 0x0080, + 0x49b0: 0x0080, 0x49b1: 0x0080, 0x49b2: 0x0080, 0x49b4: 0x0080, 0x49b5: 0x0080, + 0x49b6: 0x0080, 0x49b7: 0x0080, 0x49b9: 0x0080, 0x49bb: 0x0080, + // Block 0x127, offset 0x49c0 + 0x49c2: 0x0080, + 0x49c7: 0x0080, 0x49c9: 0x0080, 0x49cb: 0x0080, + 0x49cd: 0x0080, 0x49ce: 0x0080, 0x49cf: 0x0080, 0x49d1: 0x0080, + 0x49d2: 0x0080, 0x49d4: 0x0080, 0x49d7: 0x0080, + 0x49d9: 0x0080, 0x49db: 0x0080, 0x49dd: 0x0080, + 0x49df: 0x0080, 0x49e1: 0x0080, 0x49e2: 0x0080, + 0x49e4: 0x0080, 0x49e7: 0x0080, 0x49e8: 0x0080, 0x49e9: 0x0080, + 0x49ea: 0x0080, 0x49ec: 0x0080, 0x49ed: 0x0080, 0x49ee: 0x0080, 0x49ef: 0x0080, + 0x49f0: 0x0080, 0x49f1: 0x0080, 0x49f2: 0x0080, 0x49f4: 0x0080, 0x49f5: 0x0080, + 0x49f6: 0x0080, 0x49f7: 0x0080, 0x49f9: 0x0080, 0x49fa: 0x0080, 0x49fb: 0x0080, + 0x49fc: 0x0080, 0x49fe: 0x0080, + // Block 0x128, offset 0x4a00 + 0x4a00: 0x0080, 0x4a01: 0x0080, 0x4a02: 0x0080, 0x4a03: 0x0080, 0x4a04: 0x0080, 0x4a05: 0x0080, + 0x4a06: 0x0080, 0x4a07: 0x0080, 0x4a08: 0x0080, 0x4a09: 0x0080, 0x4a0b: 0x0080, + 0x4a0c: 0x0080, 0x4a0d: 0x0080, 0x4a0e: 0x0080, 0x4a0f: 0x0080, 0x4a10: 0x0080, 0x4a11: 0x0080, + 0x4a12: 0x0080, 0x4a13: 0x0080, 0x4a14: 0x0080, 0x4a15: 0x0080, 0x4a16: 0x0080, 0x4a17: 0x0080, + 0x4a18: 0x0080, 0x4a19: 0x0080, 0x4a1a: 0x0080, 0x4a1b: 0x0080, + 0x4a21: 0x0080, 0x4a22: 0x0080, 0x4a23: 0x0080, + 0x4a25: 0x0080, 0x4a26: 0x0080, 0x4a27: 0x0080, 0x4a28: 0x0080, 0x4a29: 0x0080, + 0x4a2b: 0x0080, 0x4a2c: 0x0080, 0x4a2d: 0x0080, 0x4a2e: 0x0080, 0x4a2f: 0x0080, + 0x4a30: 0x0080, 0x4a31: 0x0080, 0x4a32: 0x0080, 0x4a33: 0x0080, 0x4a34: 0x0080, 0x4a35: 0x0080, + 0x4a36: 0x0080, 0x4a37: 0x0080, 0x4a38: 0x0080, 0x4a39: 0x0080, 0x4a3a: 0x0080, 0x4a3b: 0x0080, + // Block 0x129, offset 0x4a40 + 0x4a70: 0x0080, 0x4a71: 0x0080, + // Block 0x12a, offset 0x4a80 + 0x4a80: 0x0080, 0x4a81: 0x0080, 0x4a82: 0x0080, 0x4a83: 0x0080, 0x4a84: 0x0080, 0x4a85: 0x0080, + 0x4a86: 0x0080, 0x4a87: 0x0080, 0x4a88: 0x0080, 0x4a89: 0x0080, 0x4a8a: 0x0080, 0x4a8b: 0x0080, + 0x4a8c: 0x0080, 0x4a8d: 0x0080, 0x4a8e: 0x0080, 0x4a8f: 0x0080, 0x4a90: 0x0080, 0x4a91: 0x0080, + 0x4a92: 0x0080, 0x4a93: 0x0080, 0x4a94: 0x0080, 0x4a95: 0x0080, 0x4a96: 0x0080, 0x4a97: 0x0080, + 0x4a98: 0x0080, 0x4a99: 0x0080, 0x4a9a: 0x0080, 0x4a9b: 0x0080, 0x4a9c: 0x0080, 0x4a9d: 0x0080, + 0x4a9e: 0x0080, 0x4a9f: 0x0080, 0x4aa0: 0x0080, 0x4aa1: 0x0080, 0x4aa2: 0x0080, 0x4aa3: 0x0080, + 0x4aa4: 0x0080, 0x4aa5: 0x0080, 0x4aa6: 0x0080, 0x4aa7: 0x0080, 0x4aa8: 0x0080, 0x4aa9: 0x0080, + 0x4aaa: 0x0080, 0x4aab: 0x0080, + 0x4ab0: 0x0080, 0x4ab1: 0x0080, 0x4ab2: 0x0080, 0x4ab3: 0x0080, 0x4ab4: 0x0080, 0x4ab5: 0x0080, + 0x4ab6: 0x0080, 0x4ab7: 0x0080, 0x4ab8: 0x0080, 0x4ab9: 0x0080, 0x4aba: 0x0080, 0x4abb: 0x0080, + 0x4abc: 0x0080, 0x4abd: 0x0080, 0x4abe: 0x0080, 0x4abf: 0x0080, + // Block 0x12b, offset 0x4ac0 + 0x4ac0: 0x0080, 0x4ac1: 0x0080, 0x4ac2: 0x0080, 0x4ac3: 0x0080, 0x4ac4: 0x0080, 0x4ac5: 0x0080, + 0x4ac6: 0x0080, 0x4ac7: 0x0080, 0x4ac8: 0x0080, 0x4ac9: 0x0080, 0x4aca: 0x0080, 0x4acb: 0x0080, + 0x4acc: 0x0080, 0x4acd: 0x0080, 0x4ace: 0x0080, 0x4acf: 0x0080, 0x4ad0: 0x0080, 0x4ad1: 0x0080, + 0x4ad2: 0x0080, 0x4ad3: 0x0080, + 0x4ae0: 0x0080, 0x4ae1: 0x0080, 0x4ae2: 0x0080, 0x4ae3: 0x0080, + 0x4ae4: 0x0080, 0x4ae5: 0x0080, 0x4ae6: 0x0080, 0x4ae7: 0x0080, 0x4ae8: 0x0080, 0x4ae9: 0x0080, + 0x4aea: 0x0080, 0x4aeb: 0x0080, 0x4aec: 0x0080, 0x4aed: 0x0080, 0x4aee: 0x0080, + 0x4af1: 0x0080, 0x4af2: 0x0080, 0x4af3: 0x0080, 0x4af4: 0x0080, 0x4af5: 0x0080, + 0x4af6: 0x0080, 0x4af7: 0x0080, 0x4af8: 0x0080, 0x4af9: 0x0080, 0x4afa: 0x0080, 0x4afb: 0x0080, + 0x4afc: 0x0080, 0x4afd: 0x0080, 0x4afe: 0x0080, 0x4aff: 0x0080, + // Block 0x12c, offset 0x4b00 + 0x4b01: 0x0080, 0x4b02: 0x0080, 0x4b03: 0x0080, 0x4b04: 0x0080, 0x4b05: 0x0080, + 0x4b06: 0x0080, 0x4b07: 0x0080, 0x4b08: 0x0080, 0x4b09: 0x0080, 0x4b0a: 0x0080, 0x4b0b: 0x0080, + 0x4b0c: 0x0080, 0x4b0d: 0x0080, 0x4b0e: 0x0080, 0x4b0f: 0x0080, 0x4b11: 0x0080, + 0x4b12: 0x0080, 0x4b13: 0x0080, 0x4b14: 0x0080, 0x4b15: 0x0080, 0x4b16: 0x0080, 0x4b17: 0x0080, + 0x4b18: 0x0080, 0x4b19: 0x0080, 0x4b1a: 0x0080, 0x4b1b: 0x0080, 0x4b1c: 0x0080, 0x4b1d: 0x0080, + 0x4b1e: 0x0080, 0x4b1f: 0x0080, 0x4b20: 0x0080, 0x4b21: 0x0080, 0x4b22: 0x0080, 0x4b23: 0x0080, + 0x4b24: 0x0080, 0x4b25: 0x0080, 0x4b26: 0x0080, 0x4b27: 0x0080, 0x4b28: 0x0080, 0x4b29: 0x0080, + 0x4b2a: 0x0080, 0x4b2b: 0x0080, 0x4b2c: 0x0080, 0x4b2d: 0x0080, 0x4b2e: 0x0080, 0x4b2f: 0x0080, + 0x4b30: 0x0080, 0x4b31: 0x0080, 0x4b32: 0x0080, 0x4b33: 0x0080, 0x4b34: 0x0080, 0x4b35: 0x0080, + // Block 0x12d, offset 0x4b40 + 0x4b40: 0x0080, 0x4b41: 0x0080, 0x4b42: 0x0080, 0x4b43: 0x0080, 0x4b44: 0x0080, 0x4b45: 0x0080, + 0x4b46: 0x0080, 0x4b47: 0x0080, 0x4b48: 0x0080, 0x4b49: 0x0080, 0x4b4a: 0x0080, 0x4b4b: 0x0080, + 0x4b4c: 0x0080, 0x4b50: 0x0080, 0x4b51: 0x0080, + 0x4b52: 0x0080, 0x4b53: 0x0080, 0x4b54: 0x0080, 0x4b55: 0x0080, 0x4b56: 0x0080, 0x4b57: 0x0080, + 0x4b58: 0x0080, 0x4b59: 0x0080, 0x4b5a: 0x0080, 0x4b5b: 0x0080, 0x4b5c: 0x0080, 0x4b5d: 0x0080, + 0x4b5e: 0x0080, 0x4b5f: 0x0080, 0x4b60: 0x0080, 0x4b61: 0x0080, 0x4b62: 0x0080, 0x4b63: 0x0080, + 0x4b64: 0x0080, 0x4b65: 0x0080, 0x4b66: 0x0080, 0x4b67: 0x0080, 0x4b68: 0x0080, 0x4b69: 0x0080, + 0x4b6a: 0x0080, 0x4b6b: 0x0080, 0x4b6c: 0x0080, 0x4b6d: 0x0080, 0x4b6e: 0x0080, + 0x4b70: 0x0080, 0x4b71: 0x0080, 0x4b72: 0x0080, 0x4b73: 0x0080, 0x4b74: 0x0080, 0x4b75: 0x0080, + 0x4b76: 0x0080, 0x4b77: 0x0080, 0x4b78: 0x0080, 0x4b79: 0x0080, 0x4b7a: 0x0080, 0x4b7b: 0x0080, + 0x4b7c: 0x0080, 0x4b7d: 0x0080, 0x4b7e: 0x0080, 0x4b7f: 0x0080, + // Block 0x12e, offset 0x4b80 + 0x4b80: 0x0080, 0x4b81: 0x0080, 0x4b82: 0x0080, 0x4b83: 0x0080, 0x4b84: 0x0080, 0x4b85: 0x0080, + 0x4b86: 0x0080, 0x4b87: 0x0080, 0x4b88: 0x0080, 0x4b89: 0x0080, 0x4b8a: 0x0080, 0x4b8b: 0x0080, + 0x4b8c: 0x0080, 0x4b8d: 0x0080, 0x4b8e: 0x0080, 0x4b8f: 0x0080, 0x4b90: 0x0080, 0x4b91: 0x0080, + 0x4b92: 0x0080, 0x4b93: 0x0080, 0x4b94: 0x0080, 0x4b95: 0x0080, 0x4b96: 0x0080, 0x4b97: 0x0080, + 0x4b98: 0x0080, 0x4b99: 0x0080, 0x4b9a: 0x0080, 0x4b9b: 0x0080, 0x4b9c: 0x0080, 0x4b9d: 0x0080, + 0x4b9e: 0x0080, 0x4b9f: 0x0080, 0x4ba0: 0x0080, 0x4ba1: 0x0080, 0x4ba2: 0x0080, 0x4ba3: 0x0080, + 0x4ba4: 0x0080, 0x4ba5: 0x0080, 0x4ba6: 0x0080, 0x4ba7: 0x0080, 0x4ba8: 0x0080, 0x4ba9: 0x0080, + 0x4baa: 0x0080, 0x4bab: 0x0080, 0x4bac: 0x0080, + // Block 0x12f, offset 0x4bc0 + 0x4be6: 0x0080, 0x4be7: 0x0080, 0x4be8: 0x0080, 0x4be9: 0x0080, + 0x4bea: 0x0080, 0x4beb: 0x0080, 0x4bec: 0x0080, 0x4bed: 0x0080, 0x4bee: 0x0080, 0x4bef: 0x0080, + 0x4bf0: 0x0080, 0x4bf1: 0x0080, 0x4bf2: 0x0080, 0x4bf3: 0x0080, 0x4bf4: 0x0080, 0x4bf5: 0x0080, + 0x4bf6: 0x0080, 0x4bf7: 0x0080, 0x4bf8: 0x0080, 0x4bf9: 0x0080, 0x4bfa: 0x0080, 0x4bfb: 0x0080, + 0x4bfc: 0x0080, 0x4bfd: 0x0080, 0x4bfe: 0x0080, 0x4bff: 0x0080, + // Block 0x130, offset 0x4c00 + 0x4c00: 0x008c, 0x4c01: 0x0080, 0x4c02: 0x0080, + 0x4c10: 0x0080, 0x4c11: 0x0080, + 0x4c12: 0x0080, 0x4c13: 0x0080, 0x4c14: 0x0080, 0x4c15: 0x0080, 0x4c16: 0x0080, 0x4c17: 0x0080, + 0x4c18: 0x0080, 0x4c19: 0x0080, 0x4c1a: 0x0080, 0x4c1b: 0x0080, 0x4c1c: 0x0080, 0x4c1d: 0x0080, + 0x4c1e: 0x0080, 0x4c1f: 0x0080, 0x4c20: 0x0080, 0x4c21: 0x0080, 0x4c22: 0x0080, 0x4c23: 0x0080, + 0x4c24: 0x0080, 0x4c25: 0x0080, 0x4c26: 0x0080, 0x4c27: 0x0080, 0x4c28: 0x0080, 0x4c29: 0x0080, + 0x4c2a: 0x0080, 0x4c2b: 0x0080, 0x4c2c: 0x0080, 0x4c2d: 0x0080, 0x4c2e: 0x0080, 0x4c2f: 0x0080, + 0x4c30: 0x0080, 0x4c31: 0x0080, 0x4c32: 0x0080, 0x4c33: 0x0080, 0x4c34: 0x0080, 0x4c35: 0x0080, + 0x4c36: 0x0080, 0x4c37: 0x0080, 0x4c38: 0x0080, 0x4c39: 0x0080, 0x4c3a: 0x0080, 0x4c3b: 0x0080, + // Block 0x131, offset 0x4c40 + 0x4c40: 0x0080, 0x4c41: 0x0080, 0x4c42: 0x0080, 0x4c43: 0x0080, 0x4c44: 0x0080, 0x4c45: 0x0080, + 0x4c46: 0x0080, 0x4c47: 0x0080, 0x4c48: 0x0080, + 0x4c50: 0x0080, 0x4c51: 0x0080, + // Block 0x132, offset 0x4c80 + 0x4c80: 0x0080, 0x4c81: 0x0080, 0x4c82: 0x0080, 0x4c83: 0x0080, 0x4c84: 0x0080, 0x4c85: 0x0080, + 0x4c86: 0x0080, 0x4c87: 0x0080, 0x4c88: 0x0080, 0x4c89: 0x0080, 0x4c8a: 0x0080, 0x4c8b: 0x0080, + 0x4c8c: 0x0080, 0x4c8d: 0x0080, 0x4c8e: 0x0080, 0x4c8f: 0x0080, 0x4c90: 0x0080, 0x4c91: 0x0080, + 0x4c92: 0x0080, + 0x4ca0: 0x0080, 0x4ca1: 0x0080, 0x4ca2: 0x0080, 0x4ca3: 0x0080, + 0x4ca4: 0x0080, 0x4ca5: 0x0080, 0x4ca6: 0x0080, 0x4ca7: 0x0080, 0x4ca8: 0x0080, 0x4ca9: 0x0080, + 0x4caa: 0x0080, 0x4cab: 0x0080, 0x4cac: 0x0080, + 0x4cb0: 0x0080, 0x4cb1: 0x0080, 0x4cb2: 0x0080, 0x4cb3: 0x0080, 0x4cb4: 0x0080, 0x4cb5: 0x0080, + 0x4cb6: 0x0080, + // Block 0x133, offset 0x4cc0 + 0x4cc0: 0x0080, 0x4cc1: 0x0080, 0x4cc2: 0x0080, 0x4cc3: 0x0080, 0x4cc4: 0x0080, 0x4cc5: 0x0080, + 0x4cc6: 0x0080, 0x4cc7: 0x0080, 0x4cc8: 0x0080, 0x4cc9: 0x0080, 0x4cca: 0x0080, 0x4ccb: 0x0080, + 0x4ccc: 0x0080, 0x4ccd: 0x0080, 0x4cce: 0x0080, 0x4ccf: 0x0080, 0x4cd0: 0x0080, 0x4cd1: 0x0080, + 0x4cd2: 0x0080, 0x4cd3: 0x0080, 0x4cd4: 0x0080, 0x4cd5: 0x0080, 0x4cd6: 0x0080, 0x4cd7: 0x0080, + 0x4cd8: 0x0080, 0x4cd9: 0x0080, 0x4cda: 0x0080, 0x4cdb: 0x0080, 0x4cdc: 0x0080, 0x4cdd: 0x0080, + 0x4cde: 0x0080, 0x4cdf: 0x0080, 0x4ce0: 0x0080, 0x4ce1: 0x0080, 0x4ce2: 0x0080, 0x4ce3: 0x0080, + 0x4ce4: 0x0080, 0x4ce5: 0x0080, 0x4ce6: 0x0080, 0x4ce7: 0x0080, 0x4ce8: 0x0080, 0x4ce9: 0x0080, + 0x4cea: 0x0080, 0x4ceb: 0x0080, 0x4cec: 0x0080, 0x4ced: 0x0080, 0x4cee: 0x0080, 0x4cef: 0x0080, + 0x4cf0: 0x0080, 0x4cf1: 0x0080, 0x4cf2: 0x0080, 0x4cf3: 0x0080, + // Block 0x134, offset 0x4d00 + 0x4d00: 0x0080, 0x4d01: 0x0080, 0x4d02: 0x0080, 0x4d03: 0x0080, 0x4d04: 0x0080, 0x4d05: 0x0080, + 0x4d06: 0x0080, 0x4d07: 0x0080, 0x4d08: 0x0080, 0x4d09: 0x0080, 0x4d0a: 0x0080, 0x4d0b: 0x0080, + 0x4d0c: 0x0080, 0x4d0d: 0x0080, 0x4d0e: 0x0080, 0x4d0f: 0x0080, 0x4d10: 0x0080, 0x4d11: 0x0080, + 0x4d12: 0x0080, 0x4d13: 0x0080, 0x4d14: 0x0080, + // Block 0x135, offset 0x4d40 + 0x4d40: 0x0080, 0x4d41: 0x0080, 0x4d42: 0x0080, 0x4d43: 0x0080, 0x4d44: 0x0080, 0x4d45: 0x0080, + 0x4d46: 0x0080, 0x4d47: 0x0080, 0x4d48: 0x0080, 0x4d49: 0x0080, 0x4d4a: 0x0080, 0x4d4b: 0x0080, + 0x4d50: 0x0080, 0x4d51: 0x0080, + 0x4d52: 0x0080, 0x4d53: 0x0080, 0x4d54: 0x0080, 0x4d55: 0x0080, 0x4d56: 0x0080, 0x4d57: 0x0080, + 0x4d58: 0x0080, 0x4d59: 0x0080, 0x4d5a: 0x0080, 0x4d5b: 0x0080, 0x4d5c: 0x0080, 0x4d5d: 0x0080, + 0x4d5e: 0x0080, 0x4d5f: 0x0080, 0x4d60: 0x0080, 0x4d61: 0x0080, 0x4d62: 0x0080, 0x4d63: 0x0080, + 0x4d64: 0x0080, 0x4d65: 0x0080, 0x4d66: 0x0080, 0x4d67: 0x0080, 0x4d68: 0x0080, 0x4d69: 0x0080, + 0x4d6a: 0x0080, 0x4d6b: 0x0080, 0x4d6c: 0x0080, 0x4d6d: 0x0080, 0x4d6e: 0x0080, 0x4d6f: 0x0080, + 0x4d70: 0x0080, 0x4d71: 0x0080, 0x4d72: 0x0080, 0x4d73: 0x0080, 0x4d74: 0x0080, 0x4d75: 0x0080, + 0x4d76: 0x0080, 0x4d77: 0x0080, 0x4d78: 0x0080, 0x4d79: 0x0080, 0x4d7a: 0x0080, 0x4d7b: 0x0080, + 0x4d7c: 0x0080, 0x4d7d: 0x0080, 0x4d7e: 0x0080, 0x4d7f: 0x0080, + // Block 0x136, offset 0x4d80 + 0x4d80: 0x0080, 0x4d81: 0x0080, 0x4d82: 0x0080, 0x4d83: 0x0080, 0x4d84: 0x0080, 0x4d85: 0x0080, + 0x4d86: 0x0080, 0x4d87: 0x0080, + 0x4d90: 0x0080, 0x4d91: 0x0080, + 0x4d92: 0x0080, 0x4d93: 0x0080, 0x4d94: 0x0080, 0x4d95: 0x0080, 0x4d96: 0x0080, 0x4d97: 0x0080, + 0x4d98: 0x0080, 0x4d99: 0x0080, + 0x4da0: 0x0080, 0x4da1: 0x0080, 0x4da2: 0x0080, 0x4da3: 0x0080, + 0x4da4: 0x0080, 0x4da5: 0x0080, 0x4da6: 0x0080, 0x4da7: 0x0080, 0x4da8: 0x0080, 0x4da9: 0x0080, + 0x4daa: 0x0080, 0x4dab: 0x0080, 0x4dac: 0x0080, 0x4dad: 0x0080, 0x4dae: 0x0080, 0x4daf: 0x0080, + 0x4db0: 0x0080, 0x4db1: 0x0080, 0x4db2: 0x0080, 0x4db3: 0x0080, 0x4db4: 0x0080, 0x4db5: 0x0080, + 0x4db6: 0x0080, 0x4db7: 0x0080, 0x4db8: 0x0080, 0x4db9: 0x0080, 0x4dba: 0x0080, 0x4dbb: 0x0080, + 0x4dbc: 0x0080, 0x4dbd: 0x0080, 0x4dbe: 0x0080, 0x4dbf: 0x0080, + // Block 0x137, offset 0x4dc0 + 0x4dc0: 0x0080, 0x4dc1: 0x0080, 0x4dc2: 0x0080, 0x4dc3: 0x0080, 0x4dc4: 0x0080, 0x4dc5: 0x0080, + 0x4dc6: 0x0080, 0x4dc7: 0x0080, + 0x4dd0: 0x0080, 0x4dd1: 0x0080, + 0x4dd2: 0x0080, 0x4dd3: 0x0080, 0x4dd4: 0x0080, 0x4dd5: 0x0080, 0x4dd6: 0x0080, 0x4dd7: 0x0080, + 0x4dd8: 0x0080, 0x4dd9: 0x0080, 0x4dda: 0x0080, 0x4ddb: 0x0080, 0x4ddc: 0x0080, 0x4ddd: 0x0080, + 0x4dde: 0x0080, 0x4ddf: 0x0080, 0x4de0: 0x0080, 0x4de1: 0x0080, 0x4de2: 0x0080, 0x4de3: 0x0080, + 0x4de4: 0x0080, 0x4de5: 0x0080, 0x4de6: 0x0080, 0x4de7: 0x0080, 0x4de8: 0x0080, 0x4de9: 0x0080, + 0x4dea: 0x0080, 0x4deb: 0x0080, 0x4dec: 0x0080, 0x4ded: 0x0080, + // Block 0x138, offset 0x4e00 + 0x4e10: 0x0080, 0x4e11: 0x0080, + 0x4e12: 0x0080, 0x4e13: 0x0080, 0x4e14: 0x0080, 0x4e15: 0x0080, 0x4e16: 0x0080, 0x4e17: 0x0080, + 0x4e18: 0x0080, 0x4e19: 0x0080, 0x4e1a: 0x0080, 0x4e1b: 0x0080, 0x4e1c: 0x0080, 0x4e1d: 0x0080, + 0x4e1e: 0x0080, 0x4e20: 0x0080, 0x4e21: 0x0080, 0x4e22: 0x0080, 0x4e23: 0x0080, + 0x4e24: 0x0080, 0x4e25: 0x0080, 0x4e26: 0x0080, 0x4e27: 0x0080, + 0x4e30: 0x0080, 0x4e33: 0x0080, 0x4e34: 0x0080, 0x4e35: 0x0080, + 0x4e36: 0x0080, 0x4e37: 0x0080, 0x4e38: 0x0080, 0x4e39: 0x0080, 0x4e3a: 0x0080, 0x4e3b: 0x0080, + 0x4e3c: 0x0080, 0x4e3d: 0x0080, 0x4e3e: 0x0080, + // Block 0x139, offset 0x4e40 + 0x4e40: 0x0080, 0x4e41: 0x0080, 0x4e42: 0x0080, 0x4e43: 0x0080, 0x4e44: 0x0080, 0x4e45: 0x0080, + 0x4e46: 0x0080, 0x4e47: 0x0080, 0x4e48: 0x0080, 0x4e49: 0x0080, 0x4e4a: 0x0080, 0x4e4b: 0x0080, + 0x4e50: 0x0080, 0x4e51: 0x0080, + 0x4e52: 0x0080, 0x4e53: 0x0080, 0x4e54: 0x0080, 0x4e55: 0x0080, 0x4e56: 0x0080, 0x4e57: 0x0080, + 0x4e58: 0x0080, 0x4e59: 0x0080, 0x4e5a: 0x0080, 0x4e5b: 0x0080, 0x4e5c: 0x0080, 0x4e5d: 0x0080, + 0x4e5e: 0x0080, + // Block 0x13a, offset 0x4e80 + 0x4e80: 0x0080, 0x4e81: 0x0080, 0x4e82: 0x0080, 0x4e83: 0x0080, 0x4e84: 0x0080, 0x4e85: 0x0080, + 0x4e86: 0x0080, 0x4e87: 0x0080, 0x4e88: 0x0080, 0x4e89: 0x0080, 0x4e8a: 0x0080, 0x4e8b: 0x0080, + 0x4e8c: 0x0080, 0x4e8d: 0x0080, 0x4e8e: 0x0080, 0x4e8f: 0x0080, 0x4e90: 0x0080, 0x4e91: 0x0080, + // Block 0x13b, offset 0x4ec0 + 0x4ec0: 0x0080, + // Block 0x13c, offset 0x4f00 + 0x4f00: 0x00cc, 0x4f01: 0x00cc, 0x4f02: 0x00cc, 0x4f03: 0x00cc, 0x4f04: 0x00cc, 0x4f05: 0x00cc, + 0x4f06: 0x00cc, 0x4f07: 0x00cc, 0x4f08: 0x00cc, 0x4f09: 0x00cc, 0x4f0a: 0x00cc, 0x4f0b: 0x00cc, + 0x4f0c: 0x00cc, 0x4f0d: 0x00cc, 0x4f0e: 0x00cc, 0x4f0f: 0x00cc, 0x4f10: 0x00cc, 0x4f11: 0x00cc, + 0x4f12: 0x00cc, 0x4f13: 0x00cc, 0x4f14: 0x00cc, 0x4f15: 0x00cc, 0x4f16: 0x00cc, + // Block 0x13d, offset 0x4f40 + 0x4f40: 0x00cc, 0x4f41: 0x00cc, 0x4f42: 0x00cc, 0x4f43: 0x00cc, 0x4f44: 0x00cc, 0x4f45: 0x00cc, + 0x4f46: 0x00cc, 0x4f47: 0x00cc, 0x4f48: 0x00cc, 0x4f49: 0x00cc, 0x4f4a: 0x00cc, 0x4f4b: 0x00cc, + 0x4f4c: 0x00cc, 0x4f4d: 0x00cc, 0x4f4e: 0x00cc, 0x4f4f: 0x00cc, 0x4f50: 0x00cc, 0x4f51: 0x00cc, + 0x4f52: 0x00cc, 0x4f53: 0x00cc, 0x4f54: 0x00cc, 0x4f55: 0x00cc, 0x4f56: 0x00cc, 0x4f57: 0x00cc, + 0x4f58: 0x00cc, 0x4f59: 0x00cc, 0x4f5a: 0x00cc, 0x4f5b: 0x00cc, 0x4f5c: 0x00cc, 0x4f5d: 0x00cc, + 0x4f5e: 0x00cc, 0x4f5f: 0x00cc, 0x4f60: 0x00cc, 0x4f61: 0x00cc, 0x4f62: 0x00cc, 0x4f63: 0x00cc, + 0x4f64: 0x00cc, 0x4f65: 0x00cc, 0x4f66: 0x00cc, 0x4f67: 0x00cc, 0x4f68: 0x00cc, 0x4f69: 0x00cc, + 0x4f6a: 0x00cc, 0x4f6b: 0x00cc, 0x4f6c: 0x00cc, 0x4f6d: 0x00cc, 0x4f6e: 0x00cc, 0x4f6f: 0x00cc, + 0x4f70: 0x00cc, 0x4f71: 0x00cc, 0x4f72: 0x00cc, 0x4f73: 0x00cc, 0x4f74: 0x00cc, + // Block 0x13e, offset 0x4f80 + 0x4f80: 0x00cc, 0x4f81: 0x00cc, 0x4f82: 0x00cc, 0x4f83: 0x00cc, 0x4f84: 0x00cc, 0x4f85: 0x00cc, + 0x4f86: 0x00cc, 0x4f87: 0x00cc, 0x4f88: 0x00cc, 0x4f89: 0x00cc, 0x4f8a: 0x00cc, 0x4f8b: 0x00cc, + 0x4f8c: 0x00cc, 0x4f8d: 0x00cc, 0x4f8e: 0x00cc, 0x4f8f: 0x00cc, 0x4f90: 0x00cc, 0x4f91: 0x00cc, + 0x4f92: 0x00cc, 0x4f93: 0x00cc, 0x4f94: 0x00cc, 0x4f95: 0x00cc, 0x4f96: 0x00cc, 0x4f97: 0x00cc, + 0x4f98: 0x00cc, 0x4f99: 0x00cc, 0x4f9a: 0x00cc, 0x4f9b: 0x00cc, 0x4f9c: 0x00cc, 0x4f9d: 0x00cc, + 0x4fa0: 0x00cc, 0x4fa1: 0x00cc, 0x4fa2: 0x00cc, 0x4fa3: 0x00cc, + 0x4fa4: 0x00cc, 0x4fa5: 0x00cc, 0x4fa6: 0x00cc, 0x4fa7: 0x00cc, 0x4fa8: 0x00cc, 0x4fa9: 0x00cc, + 0x4faa: 0x00cc, 0x4fab: 0x00cc, 0x4fac: 0x00cc, 0x4fad: 0x00cc, 0x4fae: 0x00cc, 0x4faf: 0x00cc, + 0x4fb0: 0x00cc, 0x4fb1: 0x00cc, 0x4fb2: 0x00cc, 0x4fb3: 0x00cc, 0x4fb4: 0x00cc, 0x4fb5: 0x00cc, + 0x4fb6: 0x00cc, 0x4fb7: 0x00cc, 0x4fb8: 0x00cc, 0x4fb9: 0x00cc, 0x4fba: 0x00cc, 0x4fbb: 0x00cc, + 0x4fbc: 0x00cc, 0x4fbd: 0x00cc, 0x4fbe: 0x00cc, 0x4fbf: 0x00cc, + // Block 0x13f, offset 0x4fc0 + 0x4fc0: 0x00cc, 0x4fc1: 0x00cc, 0x4fc2: 0x00cc, 0x4fc3: 0x00cc, 0x4fc4: 0x00cc, 0x4fc5: 0x00cc, + 0x4fc6: 0x00cc, 0x4fc7: 0x00cc, 0x4fc8: 0x00cc, 0x4fc9: 0x00cc, 0x4fca: 0x00cc, 0x4fcb: 0x00cc, + 0x4fcc: 0x00cc, 0x4fcd: 0x00cc, 0x4fce: 0x00cc, 0x4fcf: 0x00cc, 0x4fd0: 0x00cc, 0x4fd1: 0x00cc, + 0x4fd2: 0x00cc, 0x4fd3: 0x00cc, 0x4fd4: 0x00cc, 0x4fd5: 0x00cc, 0x4fd6: 0x00cc, 0x4fd7: 0x00cc, + 0x4fd8: 0x00cc, 0x4fd9: 0x00cc, 0x4fda: 0x00cc, 0x4fdb: 0x00cc, 0x4fdc: 0x00cc, 0x4fdd: 0x00cc, + 0x4fde: 0x00cc, 0x4fdf: 0x00cc, 0x4fe0: 0x00cc, 0x4fe1: 0x00cc, + // Block 0x140, offset 0x5000 + 0x5000: 0x008c, 0x5001: 0x008c, 0x5002: 0x008c, 0x5003: 0x008c, 0x5004: 0x008c, 0x5005: 0x008c, + 0x5006: 0x008c, 0x5007: 0x008c, 0x5008: 0x008c, 0x5009: 0x008c, 0x500a: 0x008c, 0x500b: 0x008c, + 0x500c: 0x008c, 0x500d: 0x008c, 0x500e: 0x008c, 0x500f: 0x008c, 0x5010: 0x008c, 0x5011: 0x008c, + 0x5012: 0x008c, 0x5013: 0x008c, 0x5014: 0x008c, 0x5015: 0x008c, 0x5016: 0x008c, 0x5017: 0x008c, + 0x5018: 0x008c, 0x5019: 0x008c, 0x501a: 0x008c, 0x501b: 0x008c, 0x501c: 0x008c, 0x501d: 0x008c, + // Block 0x141, offset 0x5040 + 0x5041: 0x0040, + 0x5060: 0x0040, 0x5061: 0x0040, 0x5062: 0x0040, 0x5063: 0x0040, + 0x5064: 0x0040, 0x5065: 0x0040, 0x5066: 0x0040, 0x5067: 0x0040, 0x5068: 0x0040, 0x5069: 0x0040, + 0x506a: 0x0040, 0x506b: 0x0040, 0x506c: 0x0040, 0x506d: 0x0040, 0x506e: 0x0040, 0x506f: 0x0040, + 0x5070: 0x0040, 0x5071: 0x0040, 0x5072: 0x0040, 0x5073: 0x0040, 0x5074: 0x0040, 0x5075: 0x0040, + 0x5076: 0x0040, 0x5077: 0x0040, 0x5078: 0x0040, 0x5079: 0x0040, 0x507a: 0x0040, 0x507b: 0x0040, + 0x507c: 0x0040, 0x507d: 0x0040, 0x507e: 0x0040, 0x507f: 0x0040, + // Block 0x142, offset 0x5080 + 0x5080: 0x0040, 0x5081: 0x0040, 0x5082: 0x0040, 0x5083: 0x0040, 0x5084: 0x0040, 0x5085: 0x0040, + 0x5086: 0x0040, 0x5087: 0x0040, 0x5088: 0x0040, 0x5089: 0x0040, 0x508a: 0x0040, 0x508b: 0x0040, + 0x508c: 0x0040, 0x508d: 0x0040, 0x508e: 0x0040, 0x508f: 0x0040, 0x5090: 0x0040, 0x5091: 0x0040, + 0x5092: 0x0040, 0x5093: 0x0040, 0x5094: 0x0040, 0x5095: 0x0040, 0x5096: 0x0040, 0x5097: 0x0040, + 0x5098: 0x0040, 0x5099: 0x0040, 0x509a: 0x0040, 0x509b: 0x0040, 0x509c: 0x0040, 0x509d: 0x0040, + 0x509e: 0x0040, 0x509f: 0x0040, 0x50a0: 0x0040, 0x50a1: 0x0040, 0x50a2: 0x0040, 0x50a3: 0x0040, + 0x50a4: 0x0040, 0x50a5: 0x0040, 0x50a6: 0x0040, 0x50a7: 0x0040, 0x50a8: 0x0040, 0x50a9: 0x0040, + 0x50aa: 0x0040, 0x50ab: 0x0040, 0x50ac: 0x0040, 0x50ad: 0x0040, 0x50ae: 0x0040, 0x50af: 0x0040, + // Block 0x143, offset 0x50c0 + 0x50c0: 0x0040, 0x50c1: 0x0040, 0x50c2: 0x0040, 0x50c3: 0x0040, 0x50c4: 0x0040, 0x50c5: 0x0040, + 0x50c6: 0x0040, 0x50c7: 0x0040, 0x50c8: 0x0040, 0x50c9: 0x0040, 0x50ca: 0x0040, 0x50cb: 0x0040, + 0x50cc: 0x0040, 0x50cd: 0x0040, 0x50ce: 0x0040, 0x50cf: 0x0040, 0x50d0: 0x0040, 0x50d1: 0x0040, + 0x50d2: 0x0040, 0x50d3: 0x0040, 0x50d4: 0x0040, 0x50d5: 0x0040, 0x50d6: 0x0040, 0x50d7: 0x0040, + 0x50d8: 0x0040, 0x50d9: 0x0040, 0x50da: 0x0040, 0x50db: 0x0040, 0x50dc: 0x0040, 0x50dd: 0x0040, + 0x50de: 0x0040, 0x50df: 0x0040, 0x50e0: 0x0040, 0x50e1: 0x0040, 0x50e2: 0x0040, 0x50e3: 0x0040, + 0x50e4: 0x0040, 0x50e5: 0x0040, 0x50e6: 0x0040, 0x50e7: 0x0040, 0x50e8: 0x0040, 0x50e9: 0x0040, + 0x50ea: 0x0040, 0x50eb: 0x0040, 0x50ec: 0x0040, 0x50ed: 0x0040, 0x50ee: 0x0040, 0x50ef: 0x0040, + 0x50f0: 0x0040, 0x50f1: 0x0040, 0x50f2: 0x0040, 0x50f3: 0x0040, 0x50f4: 0x0040, 0x50f5: 0x0040, + 0x50f6: 0x0040, 0x50f7: 0x0040, 0x50f8: 0x0040, 0x50f9: 0x0040, 0x50fa: 0x0040, 0x50fb: 0x0040, + 0x50fc: 0x0040, 0x50fd: 0x0040, +} + +// derivedPropertiesIndex: 36 blocks, 2304 entries, 4608 bytes +// Block 0 is the zero block. +var derivedPropertiesIndex = [2304]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc6: 0x05, 0xc7: 0x06, + 0xc8: 0x05, 0xc9: 0x05, 0xca: 0x07, 0xcb: 0x08, 0xcc: 0x09, 0xcd: 0x0a, 0xce: 0x0b, 0xcf: 0x0c, + 0xd0: 0x05, 0xd1: 0x05, 0xd2: 0x0d, 0xd3: 0x05, 0xd4: 0x0e, 0xd5: 0x0f, 0xd6: 0x10, 0xd7: 0x11, + 0xd8: 0x12, 0xd9: 0x13, 0xda: 0x14, 0xdb: 0x15, 0xdc: 0x16, 0xdd: 0x17, 0xde: 0x18, 0xdf: 0x19, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x0a, 0xec: 0x0a, 0xed: 0x0b, 0xee: 0x0c, 0xef: 0x0d, + 0xf0: 0x1d, 0xf3: 0x20, 0xf4: 0x21, + // Block 0x4, offset 0x100 + 0x120: 0x1a, 0x121: 0x1b, 0x122: 0x1c, 0x123: 0x1d, 0x124: 0x1e, 0x125: 0x1f, 0x126: 0x20, 0x127: 0x21, + 0x128: 0x22, 0x129: 0x23, 0x12a: 0x24, 0x12b: 0x25, 0x12c: 0x26, 0x12d: 0x27, 0x12e: 0x28, 0x12f: 0x29, + 0x130: 0x2a, 0x131: 0x2b, 0x132: 0x2c, 0x133: 0x2d, 0x134: 0x2e, 0x135: 0x2f, 0x136: 0x30, 0x137: 0x31, + 0x138: 0x32, 0x139: 0x33, 0x13a: 0x34, 0x13b: 0x35, 0x13c: 0x36, 0x13d: 0x37, 0x13e: 0x38, 0x13f: 0x39, + // Block 0x5, offset 0x140 + 0x140: 0x3a, 0x141: 0x3b, 0x142: 0x3c, 0x143: 0x3d, 0x144: 0x3e, 0x145: 0x3e, 0x146: 0x3e, 0x147: 0x3e, + 0x148: 0x05, 0x149: 0x3f, 0x14a: 0x40, 0x14b: 0x41, 0x14c: 0x42, 0x14d: 0x43, 0x14e: 0x44, 0x14f: 0x45, + 0x150: 0x46, 0x151: 0x05, 0x152: 0x05, 0x153: 0x05, 0x154: 0x05, 0x155: 0x05, 0x156: 0x05, 0x157: 0x05, + 0x158: 0x05, 0x159: 0x47, 0x15a: 0x48, 0x15b: 0x49, 0x15c: 0x4a, 0x15d: 0x4b, 0x15e: 0x4c, 0x15f: 0x4d, + 0x160: 0x4e, 0x161: 0x4f, 0x162: 0x50, 0x163: 0x51, 0x164: 0x52, 0x165: 0x53, 0x166: 0x54, 0x167: 0x55, + 0x168: 0x56, 0x169: 0x57, 0x16a: 0x58, 0x16c: 0x59, 0x16d: 0x5a, 0x16e: 0x5b, 0x16f: 0x5c, + 0x170: 0x5d, 0x171: 0x5e, 0x172: 0x5f, 0x173: 0x60, 0x174: 0x61, 0x175: 0x62, 0x176: 0x63, 0x177: 0x64, + 0x178: 0x05, 0x179: 0x05, 0x17a: 0x65, 0x17b: 0x05, 0x17c: 0x66, 0x17d: 0x67, 0x17e: 0x68, 0x17f: 0x69, + // Block 0x6, offset 0x180 + 0x180: 0x6a, 0x181: 0x6b, 0x182: 0x6c, 0x183: 0x6d, 0x184: 0x6e, 0x185: 0x6f, 0x186: 0x70, 0x187: 0x71, + 0x188: 0x71, 0x189: 0x71, 0x18a: 0x71, 0x18b: 0x71, 0x18c: 0x71, 0x18d: 0x71, 0x18e: 0x71, 0x18f: 0x72, + 0x190: 0x73, 0x191: 0x74, 0x192: 0x71, 0x193: 0x71, 0x194: 0x71, 0x195: 0x71, 0x196: 0x71, 0x197: 0x71, + 0x198: 0x71, 0x199: 0x71, 0x19a: 0x71, 0x19b: 0x71, 0x19c: 0x71, 0x19d: 0x71, 0x19e: 0x71, 0x19f: 0x71, + 0x1a0: 0x71, 0x1a1: 0x71, 0x1a2: 0x71, 0x1a3: 0x71, 0x1a4: 0x71, 0x1a5: 0x71, 0x1a6: 0x71, 0x1a7: 0x71, + 0x1a8: 0x71, 0x1a9: 0x71, 0x1aa: 0x71, 0x1ab: 0x71, 0x1ac: 0x71, 0x1ad: 0x75, 0x1ae: 0x76, 0x1af: 0x77, + 0x1b0: 0x78, 0x1b1: 0x79, 0x1b2: 0x05, 0x1b3: 0x7a, 0x1b4: 0x7b, 0x1b5: 0x7c, 0x1b6: 0x7d, 0x1b7: 0x7e, + 0x1b8: 0x7f, 0x1b9: 0x80, 0x1ba: 0x81, 0x1bb: 0x82, 0x1bc: 0x83, 0x1bd: 0x83, 0x1be: 0x83, 0x1bf: 0x84, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x85, 0x1c1: 0x86, 0x1c2: 0x87, 0x1c3: 0x88, 0x1c4: 0x89, 0x1c5: 0x8a, 0x1c6: 0x8b, 0x1c7: 0x8c, + 0x1c8: 0x8d, 0x1c9: 0x71, 0x1ca: 0x71, 0x1cb: 0x8e, 0x1cc: 0x83, 0x1cd: 0x8f, 0x1ce: 0x71, 0x1cf: 0x71, + 0x1d0: 0x90, 0x1d1: 0x90, 0x1d2: 0x90, 0x1d3: 0x90, 0x1d4: 0x90, 0x1d5: 0x90, 0x1d6: 0x90, 0x1d7: 0x90, + 0x1d8: 0x90, 0x1d9: 0x90, 0x1da: 0x90, 0x1db: 0x90, 0x1dc: 0x90, 0x1dd: 0x90, 0x1de: 0x90, 0x1df: 0x90, + 0x1e0: 0x90, 0x1e1: 0x90, 0x1e2: 0x90, 0x1e3: 0x90, 0x1e4: 0x90, 0x1e5: 0x90, 0x1e6: 0x90, 0x1e7: 0x90, + 0x1e8: 0x90, 0x1e9: 0x90, 0x1ea: 0x90, 0x1eb: 0x90, 0x1ec: 0x90, 0x1ed: 0x90, 0x1ee: 0x90, 0x1ef: 0x90, + 0x1f0: 0x90, 0x1f1: 0x90, 0x1f2: 0x90, 0x1f3: 0x90, 0x1f4: 0x90, 0x1f5: 0x90, 0x1f6: 0x90, 0x1f7: 0x90, + 0x1f8: 0x90, 0x1f9: 0x90, 0x1fa: 0x90, 0x1fb: 0x90, 0x1fc: 0x90, 0x1fd: 0x90, 0x1fe: 0x90, 0x1ff: 0x90, + // Block 0x8, offset 0x200 + 0x200: 0x90, 0x201: 0x90, 0x202: 0x90, 0x203: 0x90, 0x204: 0x90, 0x205: 0x90, 0x206: 0x90, 0x207: 0x90, + 0x208: 0x90, 0x209: 0x90, 0x20a: 0x90, 0x20b: 0x90, 0x20c: 0x90, 0x20d: 0x90, 0x20e: 0x90, 0x20f: 0x90, + 0x210: 0x90, 0x211: 0x90, 0x212: 0x90, 0x213: 0x90, 0x214: 0x90, 0x215: 0x90, 0x216: 0x90, 0x217: 0x90, + 0x218: 0x90, 0x219: 0x90, 0x21a: 0x90, 0x21b: 0x90, 0x21c: 0x90, 0x21d: 0x90, 0x21e: 0x90, 0x21f: 0x90, + 0x220: 0x90, 0x221: 0x90, 0x222: 0x90, 0x223: 0x90, 0x224: 0x90, 0x225: 0x90, 0x226: 0x90, 0x227: 0x90, + 0x228: 0x90, 0x229: 0x90, 0x22a: 0x90, 0x22b: 0x90, 0x22c: 0x90, 0x22d: 0x90, 0x22e: 0x90, 0x22f: 0x90, + 0x230: 0x90, 0x231: 0x90, 0x232: 0x90, 0x233: 0x90, 0x234: 0x90, 0x235: 0x90, 0x236: 0x91, 0x237: 0x71, + 0x238: 0x90, 0x239: 0x90, 0x23a: 0x90, 0x23b: 0x90, 0x23c: 0x90, 0x23d: 0x90, 0x23e: 0x90, 0x23f: 0x90, + // Block 0x9, offset 0x240 + 0x240: 0x90, 0x241: 0x90, 0x242: 0x90, 0x243: 0x90, 0x244: 0x90, 0x245: 0x90, 0x246: 0x90, 0x247: 0x90, + 0x248: 0x90, 0x249: 0x90, 0x24a: 0x90, 0x24b: 0x90, 0x24c: 0x90, 0x24d: 0x90, 0x24e: 0x90, 0x24f: 0x90, + 0x250: 0x90, 0x251: 0x90, 0x252: 0x90, 0x253: 0x90, 0x254: 0x90, 0x255: 0x90, 0x256: 0x90, 0x257: 0x90, + 0x258: 0x90, 0x259: 0x90, 0x25a: 0x90, 0x25b: 0x90, 0x25c: 0x90, 0x25d: 0x90, 0x25e: 0x90, 0x25f: 0x90, + 0x260: 0x90, 0x261: 0x90, 0x262: 0x90, 0x263: 0x90, 0x264: 0x90, 0x265: 0x90, 0x266: 0x90, 0x267: 0x90, + 0x268: 0x90, 0x269: 0x90, 0x26a: 0x90, 0x26b: 0x90, 0x26c: 0x90, 0x26d: 0x90, 0x26e: 0x90, 0x26f: 0x90, + 0x270: 0x90, 0x271: 0x90, 0x272: 0x90, 0x273: 0x90, 0x274: 0x90, 0x275: 0x90, 0x276: 0x90, 0x277: 0x90, + 0x278: 0x90, 0x279: 0x90, 0x27a: 0x90, 0x27b: 0x90, 0x27c: 0x90, 0x27d: 0x90, 0x27e: 0x90, 0x27f: 0x90, + // Block 0xa, offset 0x280 + 0x280: 0x90, 0x281: 0x90, 0x282: 0x90, 0x283: 0x90, 0x284: 0x90, 0x285: 0x90, 0x286: 0x90, 0x287: 0x90, + 0x288: 0x90, 0x289: 0x90, 0x28a: 0x90, 0x28b: 0x90, 0x28c: 0x90, 0x28d: 0x90, 0x28e: 0x90, 0x28f: 0x90, + 0x290: 0x90, 0x291: 0x90, 0x292: 0x90, 0x293: 0x90, 0x294: 0x90, 0x295: 0x90, 0x296: 0x90, 0x297: 0x90, + 0x298: 0x90, 0x299: 0x90, 0x29a: 0x90, 0x29b: 0x90, 0x29c: 0x90, 0x29d: 0x90, 0x29e: 0x90, 0x29f: 0x90, + 0x2a0: 0x90, 0x2a1: 0x90, 0x2a2: 0x90, 0x2a3: 0x90, 0x2a4: 0x90, 0x2a5: 0x90, 0x2a6: 0x90, 0x2a7: 0x90, + 0x2a8: 0x90, 0x2a9: 0x90, 0x2aa: 0x90, 0x2ab: 0x90, 0x2ac: 0x90, 0x2ad: 0x90, 0x2ae: 0x90, 0x2af: 0x90, + 0x2b0: 0x90, 0x2b1: 0x90, 0x2b2: 0x90, 0x2b3: 0x90, 0x2b4: 0x90, 0x2b5: 0x90, 0x2b6: 0x90, 0x2b7: 0x90, + 0x2b8: 0x90, 0x2b9: 0x90, 0x2ba: 0x90, 0x2bb: 0x90, 0x2bc: 0x90, 0x2bd: 0x90, 0x2be: 0x90, 0x2bf: 0x92, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x05, 0x2c1: 0x05, 0x2c2: 0x05, 0x2c3: 0x05, 0x2c4: 0x05, 0x2c5: 0x05, 0x2c6: 0x05, 0x2c7: 0x05, + 0x2c8: 0x05, 0x2c9: 0x05, 0x2ca: 0x05, 0x2cb: 0x05, 0x2cc: 0x05, 0x2cd: 0x05, 0x2ce: 0x05, 0x2cf: 0x05, + 0x2d0: 0x05, 0x2d1: 0x05, 0x2d2: 0x93, 0x2d3: 0x94, 0x2d4: 0x05, 0x2d5: 0x05, 0x2d6: 0x05, 0x2d7: 0x05, + 0x2d8: 0x95, 0x2d9: 0x96, 0x2da: 0x97, 0x2db: 0x98, 0x2dc: 0x99, 0x2dd: 0x9a, 0x2de: 0x9b, 0x2df: 0x9c, + 0x2e0: 0x9d, 0x2e1: 0x9e, 0x2e2: 0x05, 0x2e3: 0x9f, 0x2e4: 0xa0, 0x2e5: 0xa1, 0x2e6: 0xa2, 0x2e7: 0xa3, + 0x2e8: 0xa4, 0x2e9: 0xa5, 0x2ea: 0xa6, 0x2eb: 0xa7, 0x2ec: 0xa8, 0x2ed: 0xa9, 0x2ee: 0x05, 0x2ef: 0xaa, + 0x2f0: 0x05, 0x2f1: 0x05, 0x2f2: 0x05, 0x2f3: 0x05, 0x2f4: 0x05, 0x2f5: 0x05, 0x2f6: 0x05, 0x2f7: 0x05, + 0x2f8: 0x05, 0x2f9: 0x05, 0x2fa: 0x05, 0x2fb: 0x05, 0x2fc: 0x05, 0x2fd: 0x05, 0x2fe: 0x05, 0x2ff: 0x05, + // Block 0xc, offset 0x300 + 0x300: 0x05, 0x301: 0x05, 0x302: 0x05, 0x303: 0x05, 0x304: 0x05, 0x305: 0x05, 0x306: 0x05, 0x307: 0x05, + 0x308: 0x05, 0x309: 0x05, 0x30a: 0x05, 0x30b: 0x05, 0x30c: 0x05, 0x30d: 0x05, 0x30e: 0x05, 0x30f: 0x05, + 0x310: 0x05, 0x311: 0x05, 0x312: 0x05, 0x313: 0x05, 0x314: 0x05, 0x315: 0x05, 0x316: 0x05, 0x317: 0x05, + 0x318: 0x05, 0x319: 0x05, 0x31a: 0x05, 0x31b: 0x05, 0x31c: 0x05, 0x31d: 0x05, 0x31e: 0x05, 0x31f: 0x05, + 0x320: 0x05, 0x321: 0x05, 0x322: 0x05, 0x323: 0x05, 0x324: 0x05, 0x325: 0x05, 0x326: 0x05, 0x327: 0x05, + 0x328: 0x05, 0x329: 0x05, 0x32a: 0x05, 0x32b: 0x05, 0x32c: 0x05, 0x32d: 0x05, 0x32e: 0x05, 0x32f: 0x05, + 0x330: 0x05, 0x331: 0x05, 0x332: 0x05, 0x333: 0x05, 0x334: 0x05, 0x335: 0x05, 0x336: 0x05, 0x337: 0x05, + 0x338: 0x05, 0x339: 0x05, 0x33a: 0x05, 0x33b: 0x05, 0x33c: 0x05, 0x33d: 0x05, 0x33e: 0x05, 0x33f: 0x05, + // Block 0xd, offset 0x340 + 0x340: 0x05, 0x341: 0x05, 0x342: 0x05, 0x343: 0x05, 0x344: 0x05, 0x345: 0x05, 0x346: 0x05, 0x347: 0x05, + 0x348: 0x05, 0x349: 0x05, 0x34a: 0x05, 0x34b: 0x05, 0x34c: 0x05, 0x34d: 0x05, 0x34e: 0x05, 0x34f: 0x05, + 0x350: 0x05, 0x351: 0x05, 0x352: 0x05, 0x353: 0x05, 0x354: 0x05, 0x355: 0x05, 0x356: 0x05, 0x357: 0x05, + 0x358: 0x05, 0x359: 0x05, 0x35a: 0x05, 0x35b: 0x05, 0x35c: 0x05, 0x35d: 0x05, 0x35e: 0xab, 0x35f: 0xac, + // Block 0xe, offset 0x380 + 0x380: 0x3e, 0x381: 0x3e, 0x382: 0x3e, 0x383: 0x3e, 0x384: 0x3e, 0x385: 0x3e, 0x386: 0x3e, 0x387: 0x3e, + 0x388: 0x3e, 0x389: 0x3e, 0x38a: 0x3e, 0x38b: 0x3e, 0x38c: 0x3e, 0x38d: 0x3e, 0x38e: 0x3e, 0x38f: 0x3e, + 0x390: 0x3e, 0x391: 0x3e, 0x392: 0x3e, 0x393: 0x3e, 0x394: 0x3e, 0x395: 0x3e, 0x396: 0x3e, 0x397: 0x3e, + 0x398: 0x3e, 0x399: 0x3e, 0x39a: 0x3e, 0x39b: 0x3e, 0x39c: 0x3e, 0x39d: 0x3e, 0x39e: 0x3e, 0x39f: 0x3e, + 0x3a0: 0x3e, 0x3a1: 0x3e, 0x3a2: 0x3e, 0x3a3: 0x3e, 0x3a4: 0x3e, 0x3a5: 0x3e, 0x3a6: 0x3e, 0x3a7: 0x3e, + 0x3a8: 0x3e, 0x3a9: 0x3e, 0x3aa: 0x3e, 0x3ab: 0x3e, 0x3ac: 0x3e, 0x3ad: 0x3e, 0x3ae: 0x3e, 0x3af: 0x3e, + 0x3b0: 0x3e, 0x3b1: 0x3e, 0x3b2: 0x3e, 0x3b3: 0x3e, 0x3b4: 0x3e, 0x3b5: 0x3e, 0x3b6: 0x3e, 0x3b7: 0x3e, + 0x3b8: 0x3e, 0x3b9: 0x3e, 0x3ba: 0x3e, 0x3bb: 0x3e, 0x3bc: 0x3e, 0x3bd: 0x3e, 0x3be: 0x3e, 0x3bf: 0x3e, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x3e, 0x3c1: 0x3e, 0x3c2: 0x3e, 0x3c3: 0x3e, 0x3c4: 0x3e, 0x3c5: 0x3e, 0x3c6: 0x3e, 0x3c7: 0x3e, + 0x3c8: 0x3e, 0x3c9: 0x3e, 0x3ca: 0x3e, 0x3cb: 0x3e, 0x3cc: 0x3e, 0x3cd: 0x3e, 0x3ce: 0x3e, 0x3cf: 0x3e, + 0x3d0: 0x3e, 0x3d1: 0x3e, 0x3d2: 0x3e, 0x3d3: 0x3e, 0x3d4: 0x3e, 0x3d5: 0x3e, 0x3d6: 0x3e, 0x3d7: 0x3e, + 0x3d8: 0x3e, 0x3d9: 0x3e, 0x3da: 0x3e, 0x3db: 0x3e, 0x3dc: 0x3e, 0x3dd: 0x3e, 0x3de: 0x3e, 0x3df: 0x3e, + 0x3e0: 0x3e, 0x3e1: 0x3e, 0x3e2: 0x3e, 0x3e3: 0x3e, 0x3e4: 0x83, 0x3e5: 0x83, 0x3e6: 0x83, 0x3e7: 0x83, + 0x3e8: 0xad, 0x3e9: 0xae, 0x3ea: 0x83, 0x3eb: 0xaf, 0x3ec: 0xb0, 0x3ed: 0xb1, 0x3ee: 0x71, 0x3ef: 0xb2, + 0x3f0: 0x71, 0x3f1: 0x71, 0x3f2: 0x71, 0x3f3: 0x71, 0x3f4: 0x71, 0x3f5: 0xb3, 0x3f6: 0xb4, 0x3f7: 0xb5, + 0x3f8: 0xb6, 0x3f9: 0xb7, 0x3fa: 0x71, 0x3fb: 0xb8, 0x3fc: 0xb9, 0x3fd: 0xba, 0x3fe: 0xbb, 0x3ff: 0xbc, + // Block 0x10, offset 0x400 + 0x400: 0xbd, 0x401: 0xbe, 0x402: 0x05, 0x403: 0xbf, 0x404: 0xc0, 0x405: 0xc1, 0x406: 0xc2, 0x407: 0xc3, + 0x40a: 0xc4, 0x40b: 0xc5, 0x40c: 0xc6, 0x40d: 0xc7, 0x40e: 0xc8, 0x40f: 0xc9, + 0x410: 0x05, 0x411: 0x05, 0x412: 0xca, 0x413: 0xcb, 0x414: 0xcc, 0x415: 0xcd, + 0x418: 0x05, 0x419: 0x05, 0x41a: 0x05, 0x41b: 0x05, 0x41c: 0xce, 0x41d: 0xcf, + 0x420: 0xd0, 0x421: 0xd1, 0x422: 0xd2, 0x423: 0xd3, 0x424: 0xd4, 0x426: 0xd5, 0x427: 0xb4, + 0x428: 0xd6, 0x429: 0xd7, 0x42a: 0xd8, 0x42b: 0xd9, 0x42c: 0xda, 0x42d: 0xdb, 0x42e: 0xdc, + 0x430: 0x05, 0x431: 0x5f, 0x432: 0xdd, 0x433: 0xde, + 0x439: 0xdf, + // Block 0x11, offset 0x440 + 0x440: 0xe0, 0x441: 0xe1, 0x442: 0xe2, 0x443: 0xe3, 0x444: 0xe4, 0x445: 0xe5, 0x446: 0xe6, 0x447: 0xe7, + 0x448: 0xe8, 0x44a: 0xe9, 0x44b: 0xea, 0x44c: 0xeb, 0x44d: 0xec, + 0x450: 0xed, 0x451: 0xee, 0x452: 0xef, 0x453: 0xf0, 0x456: 0xf1, 0x457: 0xf2, + 0x458: 0xf3, 0x459: 0xf4, 0x45a: 0xf5, 0x45b: 0xf6, 0x45c: 0xf7, + 0x462: 0xf8, 0x463: 0xf9, + 0x46b: 0xfa, + 0x470: 0xfb, 0x471: 0xfc, 0x472: 0xfd, + // Block 0x12, offset 0x480 + 0x480: 0x05, 0x481: 0x05, 0x482: 0x05, 0x483: 0x05, 0x484: 0x05, 0x485: 0x05, 0x486: 0x05, 0x487: 0x05, + 0x488: 0x05, 0x489: 0x05, 0x48a: 0x05, 0x48b: 0x05, 0x48c: 0x05, 0x48d: 0x05, 0x48e: 0xfe, + 0x490: 0x71, 0x491: 0xff, 0x492: 0x05, 0x493: 0x05, 0x494: 0x05, 0x495: 0x100, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x05, 0x4c1: 0x05, 0x4c2: 0x05, 0x4c3: 0x05, 0x4c4: 0x05, 0x4c5: 0x05, 0x4c6: 0x05, 0x4c7: 0x05, + 0x4c8: 0x05, 0x4c9: 0x05, 0x4ca: 0x05, 0x4cb: 0x05, 0x4cc: 0x05, 0x4cd: 0x05, 0x4ce: 0x05, 0x4cf: 0x05, + 0x4d0: 0x101, + // Block 0x14, offset 0x500 + 0x510: 0x05, 0x511: 0x05, 0x512: 0x05, 0x513: 0x05, 0x514: 0x05, 0x515: 0x05, 0x516: 0x05, 0x517: 0x05, + 0x518: 0x05, 0x519: 0x102, + // Block 0x15, offset 0x540 + 0x560: 0x05, 0x561: 0x05, 0x562: 0x05, 0x563: 0x05, 0x564: 0x05, 0x565: 0x05, 0x566: 0x05, 0x567: 0x05, + 0x568: 0xfa, 0x569: 0x103, 0x56b: 0x104, 0x56c: 0x105, 0x56d: 0x106, 0x56e: 0x107, + 0x57c: 0x05, 0x57d: 0x108, 0x57e: 0x109, 0x57f: 0x10a, + // Block 0x16, offset 0x580 + 0x580: 0x05, 0x581: 0x05, 0x582: 0x05, 0x583: 0x05, 0x584: 0x05, 0x585: 0x05, 0x586: 0x05, 0x587: 0x05, + 0x588: 0x05, 0x589: 0x05, 0x58a: 0x05, 0x58b: 0x05, 0x58c: 0x05, 0x58d: 0x05, 0x58e: 0x05, 0x58f: 0x05, + 0x590: 0x05, 0x591: 0x05, 0x592: 0x05, 0x593: 0x05, 0x594: 0x05, 0x595: 0x05, 0x596: 0x05, 0x597: 0x05, + 0x598: 0x05, 0x599: 0x05, 0x59a: 0x05, 0x59b: 0x05, 0x59c: 0x05, 0x59d: 0x05, 0x59e: 0x05, 0x59f: 0x10b, + 0x5a0: 0x05, 0x5a1: 0x05, 0x5a2: 0x05, 0x5a3: 0x05, 0x5a4: 0x05, 0x5a5: 0x05, 0x5a6: 0x05, 0x5a7: 0x05, + 0x5a8: 0x05, 0x5a9: 0x05, 0x5aa: 0x05, 0x5ab: 0xdd, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x10c, + 0x5f0: 0x05, 0x5f1: 0x10d, 0x5f2: 0x10e, + // Block 0x18, offset 0x600 + 0x600: 0x71, 0x601: 0x71, 0x602: 0x71, 0x603: 0x10f, 0x604: 0x110, 0x605: 0x111, 0x606: 0x112, 0x607: 0x113, + 0x608: 0xc1, 0x609: 0x114, 0x60c: 0x71, 0x60d: 0x115, + 0x610: 0x71, 0x611: 0x116, 0x612: 0x117, 0x613: 0x118, 0x614: 0x119, 0x615: 0x11a, 0x616: 0x71, 0x617: 0x71, + 0x618: 0x71, 0x619: 0x71, 0x61a: 0x11b, 0x61b: 0x71, 0x61c: 0x71, 0x61d: 0x71, 0x61e: 0x71, 0x61f: 0x11c, + 0x620: 0x71, 0x621: 0x71, 0x622: 0x71, 0x623: 0x71, 0x624: 0x71, 0x625: 0x71, 0x626: 0x71, 0x627: 0x71, + 0x628: 0x11d, 0x629: 0x11e, 0x62a: 0x11f, + // Block 0x19, offset 0x640 + 0x640: 0x120, + 0x660: 0x05, 0x661: 0x05, 0x662: 0x05, 0x663: 0x121, 0x664: 0x122, 0x665: 0x123, + 0x678: 0x124, 0x679: 0x125, 0x67a: 0x126, 0x67b: 0x127, + // Block 0x1a, offset 0x680 + 0x680: 0x128, 0x681: 0x71, 0x682: 0x129, 0x683: 0x12a, 0x684: 0x12b, 0x685: 0x128, 0x686: 0x12c, 0x687: 0x12d, + 0x688: 0x12e, 0x689: 0x12f, 0x68c: 0x71, 0x68d: 0x71, 0x68e: 0x71, 0x68f: 0x71, + 0x690: 0x71, 0x691: 0x71, 0x692: 0x71, 0x693: 0x71, 0x694: 0x71, 0x695: 0x71, 0x696: 0x71, 0x697: 0x71, + 0x698: 0x71, 0x699: 0x71, 0x69a: 0x71, 0x69b: 0x130, 0x69c: 0x71, 0x69d: 0x131, 0x69e: 0x71, 0x69f: 0x132, + 0x6a0: 0x133, 0x6a1: 0x134, 0x6a2: 0x135, 0x6a4: 0x136, 0x6a5: 0x137, 0x6a6: 0x138, 0x6a7: 0x139, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x90, 0x6c1: 0x90, 0x6c2: 0x90, 0x6c3: 0x90, 0x6c4: 0x90, 0x6c5: 0x90, 0x6c6: 0x90, 0x6c7: 0x90, + 0x6c8: 0x90, 0x6c9: 0x90, 0x6ca: 0x90, 0x6cb: 0x90, 0x6cc: 0x90, 0x6cd: 0x90, 0x6ce: 0x90, 0x6cf: 0x90, + 0x6d0: 0x90, 0x6d1: 0x90, 0x6d2: 0x90, 0x6d3: 0x90, 0x6d4: 0x90, 0x6d5: 0x90, 0x6d6: 0x90, 0x6d7: 0x90, + 0x6d8: 0x90, 0x6d9: 0x90, 0x6da: 0x90, 0x6db: 0x13a, 0x6dc: 0x90, 0x6dd: 0x90, 0x6de: 0x90, 0x6df: 0x90, + 0x6e0: 0x90, 0x6e1: 0x90, 0x6e2: 0x90, 0x6e3: 0x90, 0x6e4: 0x90, 0x6e5: 0x90, 0x6e6: 0x90, 0x6e7: 0x90, + 0x6e8: 0x90, 0x6e9: 0x90, 0x6ea: 0x90, 0x6eb: 0x90, 0x6ec: 0x90, 0x6ed: 0x90, 0x6ee: 0x90, 0x6ef: 0x90, + 0x6f0: 0x90, 0x6f1: 0x90, 0x6f2: 0x90, 0x6f3: 0x90, 0x6f4: 0x90, 0x6f5: 0x90, 0x6f6: 0x90, 0x6f7: 0x90, + 0x6f8: 0x90, 0x6f9: 0x90, 0x6fa: 0x90, 0x6fb: 0x90, 0x6fc: 0x90, 0x6fd: 0x90, 0x6fe: 0x90, 0x6ff: 0x90, + // Block 0x1c, offset 0x700 + 0x700: 0x90, 0x701: 0x90, 0x702: 0x90, 0x703: 0x90, 0x704: 0x90, 0x705: 0x90, 0x706: 0x90, 0x707: 0x90, + 0x708: 0x90, 0x709: 0x90, 0x70a: 0x90, 0x70b: 0x90, 0x70c: 0x90, 0x70d: 0x90, 0x70e: 0x90, 0x70f: 0x90, + 0x710: 0x90, 0x711: 0x90, 0x712: 0x90, 0x713: 0x90, 0x714: 0x90, 0x715: 0x90, 0x716: 0x90, 0x717: 0x90, + 0x718: 0x90, 0x719: 0x90, 0x71a: 0x90, 0x71b: 0x90, 0x71c: 0x13b, 0x71d: 0x90, 0x71e: 0x90, 0x71f: 0x90, + 0x720: 0x13c, 0x721: 0x90, 0x722: 0x90, 0x723: 0x90, 0x724: 0x90, 0x725: 0x90, 0x726: 0x90, 0x727: 0x90, + 0x728: 0x90, 0x729: 0x90, 0x72a: 0x90, 0x72b: 0x90, 0x72c: 0x90, 0x72d: 0x90, 0x72e: 0x90, 0x72f: 0x90, + 0x730: 0x90, 0x731: 0x90, 0x732: 0x90, 0x733: 0x90, 0x734: 0x90, 0x735: 0x90, 0x736: 0x90, 0x737: 0x90, + 0x738: 0x90, 0x739: 0x90, 0x73a: 0x90, 0x73b: 0x90, 0x73c: 0x90, 0x73d: 0x90, 0x73e: 0x90, 0x73f: 0x90, + // Block 0x1d, offset 0x740 + 0x740: 0x90, 0x741: 0x90, 0x742: 0x90, 0x743: 0x90, 0x744: 0x90, 0x745: 0x90, 0x746: 0x90, 0x747: 0x90, + 0x748: 0x90, 0x749: 0x90, 0x74a: 0x90, 0x74b: 0x90, 0x74c: 0x90, 0x74d: 0x90, 0x74e: 0x90, 0x74f: 0x90, + 0x750: 0x90, 0x751: 0x90, 0x752: 0x90, 0x753: 0x90, 0x754: 0x90, 0x755: 0x90, 0x756: 0x90, 0x757: 0x90, + 0x758: 0x90, 0x759: 0x90, 0x75a: 0x90, 0x75b: 0x90, 0x75c: 0x90, 0x75d: 0x90, 0x75e: 0x90, 0x75f: 0x90, + 0x760: 0x90, 0x761: 0x90, 0x762: 0x90, 0x763: 0x90, 0x764: 0x90, 0x765: 0x90, 0x766: 0x90, 0x767: 0x90, + 0x768: 0x90, 0x769: 0x90, 0x76a: 0x90, 0x76b: 0x90, 0x76c: 0x90, 0x76d: 0x90, 0x76e: 0x90, 0x76f: 0x90, + 0x770: 0x90, 0x771: 0x90, 0x772: 0x90, 0x773: 0x90, 0x774: 0x90, 0x775: 0x90, 0x776: 0x90, 0x777: 0x90, + 0x778: 0x90, 0x779: 0x90, 0x77a: 0x13d, + // Block 0x1e, offset 0x780 + 0x7a0: 0x83, 0x7a1: 0x83, 0x7a2: 0x83, 0x7a3: 0x83, 0x7a4: 0x83, 0x7a5: 0x83, 0x7a6: 0x83, 0x7a7: 0x83, + 0x7a8: 0x13e, + // Block 0x1f, offset 0x7c0 + 0x7d0: 0x0e, 0x7d1: 0x0f, 0x7d2: 0x10, 0x7d3: 0x11, 0x7d4: 0x12, 0x7d6: 0x13, 0x7d7: 0x0a, + 0x7d8: 0x14, 0x7db: 0x15, 0x7dd: 0x16, 0x7de: 0x17, 0x7df: 0x18, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x19, 0x7eb: 0x1a, 0x7ec: 0x1b, 0x7ef: 0x1c, + // Block 0x20, offset 0x800 + 0x800: 0x13f, 0x801: 0x3e, 0x804: 0x3e, 0x805: 0x3e, 0x806: 0x3e, 0x807: 0x140, + // Block 0x21, offset 0x840 + 0x840: 0x3e, 0x841: 0x3e, 0x842: 0x3e, 0x843: 0x3e, 0x844: 0x3e, 0x845: 0x3e, 0x846: 0x3e, 0x847: 0x3e, + 0x848: 0x3e, 0x849: 0x3e, 0x84a: 0x3e, 0x84b: 0x3e, 0x84c: 0x3e, 0x84d: 0x3e, 0x84e: 0x3e, 0x84f: 0x3e, + 0x850: 0x3e, 0x851: 0x3e, 0x852: 0x3e, 0x853: 0x3e, 0x854: 0x3e, 0x855: 0x3e, 0x856: 0x3e, 0x857: 0x3e, + 0x858: 0x3e, 0x859: 0x3e, 0x85a: 0x3e, 0x85b: 0x3e, 0x85c: 0x3e, 0x85d: 0x3e, 0x85e: 0x3e, 0x85f: 0x3e, + 0x860: 0x3e, 0x861: 0x3e, 0x862: 0x3e, 0x863: 0x3e, 0x864: 0x3e, 0x865: 0x3e, 0x866: 0x3e, 0x867: 0x3e, + 0x868: 0x3e, 0x869: 0x3e, 0x86a: 0x3e, 0x86b: 0x3e, 0x86c: 0x3e, 0x86d: 0x3e, 0x86e: 0x3e, 0x86f: 0x3e, + 0x870: 0x3e, 0x871: 0x3e, 0x872: 0x3e, 0x873: 0x3e, 0x874: 0x3e, 0x875: 0x3e, 0x876: 0x3e, 0x877: 0x3e, + 0x878: 0x3e, 0x879: 0x3e, 0x87a: 0x3e, 0x87b: 0x3e, 0x87c: 0x3e, 0x87d: 0x3e, 0x87e: 0x3e, 0x87f: 0x141, + // Block 0x22, offset 0x880 + 0x8a0: 0x1e, + 0x8b0: 0x0c, 0x8b1: 0x0c, 0x8b2: 0x0c, 0x8b3: 0x0c, 0x8b4: 0x0c, 0x8b5: 0x0c, 0x8b6: 0x0c, 0x8b7: 0x0c, + 0x8b8: 0x0c, 0x8b9: 0x0c, 0x8ba: 0x0c, 0x8bb: 0x0c, 0x8bc: 0x0c, 0x8bd: 0x0c, 0x8be: 0x0c, 0x8bf: 0x1f, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0c, 0x8c1: 0x0c, 0x8c2: 0x0c, 0x8c3: 0x0c, 0x8c4: 0x0c, 0x8c5: 0x0c, 0x8c6: 0x0c, 0x8c7: 0x0c, + 0x8c8: 0x0c, 0x8c9: 0x0c, 0x8ca: 0x0c, 0x8cb: 0x0c, 0x8cc: 0x0c, 0x8cd: 0x0c, 0x8ce: 0x0c, 0x8cf: 0x1f, +} + +// Total table size 25344 bytes (24KiB); checksum: 811C9DC5 diff --git a/vendor/golang.org/x/text/secure/precis/transformer.go b/vendor/golang.org/x/text/secure/precis/transformer.go new file mode 100644 index 000000000..97ce5e757 --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/transformer.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package precis + +import "golang.org/x/text/transform" + +// Transformer implements the transform.Transformer interface. +type Transformer struct { + t transform.Transformer +} + +// Reset implements the transform.Transformer interface. +func (t Transformer) Reset() { t.t.Reset() } + +// Transform implements the transform.Transformer interface. +func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return t.t.Transform(dst, src, atEOF) +} + +// Bytes returns a new byte slice with the result of applying t to b. +func (t Transformer) Bytes(b []byte) []byte { + b, _, _ = transform.Bytes(t, b) + return b +} + +// String returns a string with the result of applying t to s. +func (t Transformer) String(s string) string { + s, _, _ = transform.String(t, s) + return s +} diff --git a/vendor/golang.org/x/text/secure/precis/trieval.go b/vendor/golang.org/x/text/secure/precis/trieval.go new file mode 100644 index 000000000..4833f9622 --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/trieval.go @@ -0,0 +1,64 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package precis + +// entry is the entry of a trie table +// 7..6 property (unassigned, disallowed, maybe, valid) +// 5..0 category +type entry uint8 + +const ( + propShift = 6 + propMask = 0xc0 + catMask = 0x3f +) + +func (e entry) property() property { return property(e & propMask) } +func (e entry) category() category { return category(e & catMask) } + +type property uint8 + +// The order of these constants matter. A Profile may consider runes to be +// allowed either from pValid or idDisOrFreePVal. +const ( + unassigned property = iota << propShift + disallowed + idDisOrFreePVal // disallowed for Identifier, pValid for FreeForm + pValid +) + +// compute permutations of all properties and specialCategories. +type category uint8 + +const ( + other category = iota + + // Special rune types + joiningL + joiningD + joiningT + joiningR + viramaModifier + viramaJoinT // Virama + JoiningT + latinSmallL // U+006c + greek + greekJoinT // Greek + JoiningT + hebrew + hebrewJoinT // Hebrew + JoiningT + japanese // hirigana, katakana, han + + // Special rune types associated with contextual rules defined in + // https://tools.ietf.org/html/rfc5892#appendix-A. + // ContextO + zeroWidthNonJoiner // rule 1 + zeroWidthJoiner // rule 2 + // ContextJ + middleDot // rule 3 + greekLowerNumeralSign // rule 4 + hebrewPreceding // rule 5 and 6 + katakanaMiddleDot // rule 7 + arabicIndicDigit // rule 8 + extendedArabicIndicDigit // rule 9 + + numCategories +) diff --git a/vendor/golang.org/x/text/width/kind_string.go b/vendor/golang.org/x/text/width/kind_string.go new file mode 100644 index 000000000..ab4fee542 --- /dev/null +++ b/vendor/golang.org/x/text/width/kind_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Kind"; DO NOT EDIT + +package width + +import "fmt" + +const _Kind_name = "NeutralEastAsianAmbiguousEastAsianWideEastAsianNarrowEastAsianFullwidthEastAsianHalfwidth" + +var _Kind_index = [...]uint8{0, 7, 25, 38, 53, 71, 89} + +func (i Kind) String() string { + if i < 0 || i >= Kind(len(_Kind_index)-1) { + return fmt.Sprintf("Kind(%d)", i) + } + return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] +} diff --git a/vendor/golang.org/x/text/width/tables.go b/vendor/golang.org/x/text/width/tables.go new file mode 100644 index 000000000..e21f0b838 --- /dev/null +++ b/vendor/golang.org/x/text/width/tables.go @@ -0,0 +1,1284 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14080 bytes (13.75 KiB). Checksum: 3b8aeb3dc03667a3. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 99 blocks, 6336 entries, 12672 bytes +// The third block is the zero block. +var widthValues = [6336]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, + 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, + 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, + 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, + 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, + 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, + 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, + 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, + 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, + 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, + // Block 0x3e, offset 0xf80 + 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, + 0xf86: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, + 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, + 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, + 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, + 0xffc: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, + 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, + 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, + 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, + 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, + 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, + 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, + 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, + 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, + // Block 0x42, offset 0x1080 + 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, + 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, + 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, + 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, + 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, + 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, + 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, + 0x10aa: 0x4000, 0x10ab: 0x4000, + // Block 0x43, offset 0x10c0 + 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, + 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, + 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, + 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, + 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, + 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, + 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, + 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, + 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, + 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, + 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, + // Block 0x44, offset 0x1100 + 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, + 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, + 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, + 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, + 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, + 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, + 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, + 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, + 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, + 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, + 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, + // Block 0x45, offset 0x1140 + 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, + 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, + 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, + 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, + 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, + 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, + 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, + 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, + 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, + 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, + 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, + // Block 0x46, offset 0x1180 + 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, + 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, + 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, + 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, + 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, + 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, + 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, + 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, + 0x11bd: 0x2000, + // Block 0x47, offset 0x11c0 + 0x11e0: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, + 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, + 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, + 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, + 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, + 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, + 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, + 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, + 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, + 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, + 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, + 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12c4: 0x4000, + // Block 0x4c, offset 0x1300 + 0x130f: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1340: 0x2000, 0x1341: 0x2000, 0x1342: 0x2000, 0x1343: 0x2000, 0x1344: 0x2000, 0x1345: 0x2000, + 0x1346: 0x2000, 0x1347: 0x2000, 0x1348: 0x2000, 0x1349: 0x2000, 0x134a: 0x2000, + 0x1350: 0x2000, 0x1351: 0x2000, + 0x1352: 0x2000, 0x1353: 0x2000, 0x1354: 0x2000, 0x1355: 0x2000, 0x1356: 0x2000, 0x1357: 0x2000, + 0x1358: 0x2000, 0x1359: 0x2000, 0x135a: 0x2000, 0x135b: 0x2000, 0x135c: 0x2000, 0x135d: 0x2000, + 0x135e: 0x2000, 0x135f: 0x2000, 0x1360: 0x2000, 0x1361: 0x2000, 0x1362: 0x2000, 0x1363: 0x2000, + 0x1364: 0x2000, 0x1365: 0x2000, 0x1366: 0x2000, 0x1367: 0x2000, 0x1368: 0x2000, 0x1369: 0x2000, + 0x136a: 0x2000, 0x136b: 0x2000, 0x136c: 0x2000, 0x136d: 0x2000, + 0x1370: 0x2000, 0x1371: 0x2000, 0x1372: 0x2000, 0x1373: 0x2000, 0x1374: 0x2000, 0x1375: 0x2000, + 0x1376: 0x2000, 0x1377: 0x2000, 0x1378: 0x2000, 0x1379: 0x2000, 0x137a: 0x2000, 0x137b: 0x2000, + 0x137c: 0x2000, 0x137d: 0x2000, 0x137e: 0x2000, 0x137f: 0x2000, + // Block 0x4e, offset 0x1380 + 0x1380: 0x2000, 0x1381: 0x2000, 0x1382: 0x2000, 0x1383: 0x2000, 0x1384: 0x2000, 0x1385: 0x2000, + 0x1386: 0x2000, 0x1387: 0x2000, 0x1388: 0x2000, 0x1389: 0x2000, 0x138a: 0x2000, 0x138b: 0x2000, + 0x138c: 0x2000, 0x138d: 0x2000, 0x138e: 0x2000, 0x138f: 0x2000, 0x1390: 0x2000, 0x1391: 0x2000, + 0x1392: 0x2000, 0x1393: 0x2000, 0x1394: 0x2000, 0x1395: 0x2000, 0x1396: 0x2000, 0x1397: 0x2000, + 0x1398: 0x2000, 0x1399: 0x2000, 0x139a: 0x2000, 0x139b: 0x2000, 0x139c: 0x2000, 0x139d: 0x2000, + 0x139e: 0x2000, 0x139f: 0x2000, 0x13a0: 0x2000, 0x13a1: 0x2000, 0x13a2: 0x2000, 0x13a3: 0x2000, + 0x13a4: 0x2000, 0x13a5: 0x2000, 0x13a6: 0x2000, 0x13a7: 0x2000, 0x13a8: 0x2000, 0x13a9: 0x2000, + 0x13b0: 0x2000, 0x13b1: 0x2000, 0x13b2: 0x2000, 0x13b3: 0x2000, 0x13b4: 0x2000, 0x13b5: 0x2000, + 0x13b6: 0x2000, 0x13b7: 0x2000, 0x13b8: 0x2000, 0x13b9: 0x2000, 0x13ba: 0x2000, 0x13bb: 0x2000, + 0x13bc: 0x2000, 0x13bd: 0x2000, 0x13be: 0x2000, 0x13bf: 0x2000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, 0x13cb: 0x2000, + 0x13cc: 0x2000, 0x13cd: 0x2000, 0x13ce: 0x4000, 0x13cf: 0x2000, 0x13d0: 0x2000, 0x13d1: 0x4000, + 0x13d2: 0x4000, 0x13d3: 0x4000, 0x13d4: 0x4000, 0x13d5: 0x4000, 0x13d6: 0x4000, 0x13d7: 0x4000, + 0x13d8: 0x4000, 0x13d9: 0x4000, 0x13da: 0x4000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x4000, 0x1401: 0x4000, 0x1402: 0x4000, + 0x1410: 0x4000, 0x1411: 0x4000, + 0x1412: 0x4000, 0x1413: 0x4000, 0x1414: 0x4000, 0x1415: 0x4000, 0x1416: 0x4000, 0x1417: 0x4000, + 0x1418: 0x4000, 0x1419: 0x4000, 0x141a: 0x4000, 0x141b: 0x4000, 0x141c: 0x4000, 0x141d: 0x4000, + 0x141e: 0x4000, 0x141f: 0x4000, 0x1420: 0x4000, 0x1421: 0x4000, 0x1422: 0x4000, 0x1423: 0x4000, + 0x1424: 0x4000, 0x1425: 0x4000, 0x1426: 0x4000, 0x1427: 0x4000, 0x1428: 0x4000, 0x1429: 0x4000, + 0x142a: 0x4000, 0x142b: 0x4000, 0x142c: 0x4000, 0x142d: 0x4000, 0x142e: 0x4000, 0x142f: 0x4000, + 0x1430: 0x4000, 0x1431: 0x4000, 0x1432: 0x4000, 0x1433: 0x4000, 0x1434: 0x4000, 0x1435: 0x4000, + 0x1436: 0x4000, 0x1437: 0x4000, 0x1438: 0x4000, 0x1439: 0x4000, 0x143a: 0x4000, 0x143b: 0x4000, + // Block 0x51, offset 0x1440 + 0x1440: 0x4000, 0x1441: 0x4000, 0x1442: 0x4000, 0x1443: 0x4000, 0x1444: 0x4000, 0x1445: 0x4000, + 0x1446: 0x4000, 0x1447: 0x4000, 0x1448: 0x4000, + 0x1450: 0x4000, 0x1451: 0x4000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, 0x1483: 0x4000, 0x1484: 0x4000, 0x1485: 0x4000, + 0x1486: 0x4000, 0x1487: 0x4000, 0x1488: 0x4000, 0x1489: 0x4000, 0x148a: 0x4000, 0x148b: 0x4000, + 0x148c: 0x4000, 0x148d: 0x4000, 0x148e: 0x4000, 0x148f: 0x4000, 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, + 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + 0x14bc: 0x4000, 0x14bd: 0x4000, 0x14be: 0x4000, 0x14bf: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, 0x14c9: 0x4000, 0x14ca: 0x4000, 0x14cb: 0x4000, + 0x14cc: 0x4000, 0x14cd: 0x4000, 0x14ce: 0x4000, 0x14cf: 0x4000, 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14d2: 0x4000, 0x14d3: 0x4000, 0x14d4: 0x4000, 0x14d5: 0x4000, 0x14d6: 0x4000, 0x14d7: 0x4000, + 0x14d8: 0x4000, 0x14d9: 0x4000, 0x14da: 0x4000, 0x14db: 0x4000, 0x14dc: 0x4000, 0x14dd: 0x4000, + 0x14de: 0x4000, 0x14df: 0x4000, 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, 0x14e6: 0x4000, 0x14e7: 0x4000, 0x14e8: 0x4000, 0x14e9: 0x4000, + 0x14ea: 0x4000, 0x14eb: 0x4000, 0x14ec: 0x4000, 0x14ed: 0x4000, 0x14ee: 0x4000, 0x14ef: 0x4000, + 0x14f0: 0x4000, 0x14f1: 0x4000, 0x14f2: 0x4000, 0x14f3: 0x4000, 0x14f4: 0x4000, 0x14f5: 0x4000, + 0x14f6: 0x4000, 0x14f7: 0x4000, 0x14f8: 0x4000, 0x14f9: 0x4000, 0x14fa: 0x4000, 0x14fb: 0x4000, + 0x14fc: 0x4000, 0x14fe: 0x4000, 0x14ff: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, + 0x1520: 0x4000, 0x1521: 0x4000, 0x1522: 0x4000, 0x1523: 0x4000, + 0x1524: 0x4000, 0x1525: 0x4000, 0x1526: 0x4000, 0x1527: 0x4000, 0x1528: 0x4000, 0x1529: 0x4000, + 0x152a: 0x4000, 0x152b: 0x4000, 0x152c: 0x4000, 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1536: 0x4000, 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, + 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, + 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1574: 0x4000, + 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157d: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, 0x1594: 0x4000, 0x1595: 0x4000, 0x1596: 0x4000, 0x1597: 0x4000, + 0x1598: 0x4000, 0x1599: 0x4000, 0x159a: 0x4000, 0x159b: 0x4000, 0x159c: 0x4000, 0x159d: 0x4000, + 0x159e: 0x4000, 0x159f: 0x4000, 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, 0x15cb: 0x4000, + 0x15cc: 0x4000, 0x15cd: 0x4000, 0x15ce: 0x4000, 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, 0x15d4: 0x4000, 0x15d5: 0x4000, 0x15d6: 0x4000, 0x15d7: 0x4000, + 0x15d8: 0x4000, 0x15d9: 0x4000, 0x15da: 0x4000, 0x15db: 0x4000, 0x15dc: 0x4000, 0x15dd: 0x4000, + 0x15de: 0x4000, 0x15df: 0x4000, 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f1: 0x4000, 0x15f2: 0x4000, 0x15f3: 0x4000, 0x15f4: 0x4000, 0x15f5: 0x4000, + 0x15f6: 0x4000, 0x15f7: 0x4000, 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, + // Block 0x59, offset 0x1640 + 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, + 0x167a: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1695: 0x4000, 0x1696: 0x4000, + 0x16a4: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16fb: 0x4000, + 0x16fc: 0x4000, 0x16fd: 0x4000, 0x16fe: 0x4000, 0x16ff: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1700: 0x4000, 0x1701: 0x4000, 0x1702: 0x4000, 0x1703: 0x4000, 0x1704: 0x4000, 0x1705: 0x4000, + 0x1706: 0x4000, 0x1707: 0x4000, 0x1708: 0x4000, 0x1709: 0x4000, 0x170a: 0x4000, 0x170b: 0x4000, + 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x170f: 0x4000, + // Block 0x5d, offset 0x1740 + 0x1740: 0x4000, 0x1741: 0x4000, 0x1742: 0x4000, 0x1743: 0x4000, 0x1744: 0x4000, 0x1745: 0x4000, + 0x174c: 0x4000, 0x1750: 0x4000, 0x1751: 0x4000, + 0x1752: 0x4000, + 0x176b: 0x4000, 0x176c: 0x4000, + 0x1774: 0x4000, 0x1775: 0x4000, + 0x1776: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1790: 0x4000, 0x1791: 0x4000, + 0x1792: 0x4000, 0x1793: 0x4000, 0x1794: 0x4000, 0x1795: 0x4000, 0x1796: 0x4000, 0x1797: 0x4000, + 0x1798: 0x4000, 0x1799: 0x4000, 0x179a: 0x4000, 0x179b: 0x4000, 0x179c: 0x4000, 0x179d: 0x4000, + 0x179e: 0x4000, 0x17a0: 0x4000, 0x17a1: 0x4000, 0x17a2: 0x4000, 0x17a3: 0x4000, + 0x17a4: 0x4000, 0x17a5: 0x4000, 0x17a6: 0x4000, 0x17a7: 0x4000, + 0x17b0: 0x4000, 0x17b3: 0x4000, 0x17b4: 0x4000, 0x17b5: 0x4000, + 0x17b6: 0x4000, 0x17b7: 0x4000, 0x17b8: 0x4000, 0x17b9: 0x4000, 0x17ba: 0x4000, 0x17bb: 0x4000, + 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, + 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, 0x17d3: 0x4000, 0x17d4: 0x4000, 0x17d5: 0x4000, 0x17d6: 0x4000, 0x17d7: 0x4000, + 0x17d8: 0x4000, 0x17d9: 0x4000, 0x17da: 0x4000, 0x17db: 0x4000, 0x17dc: 0x4000, 0x17dd: 0x4000, + 0x17de: 0x4000, + // Block 0x60, offset 0x1800 + 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, + 0x1806: 0x4000, 0x1807: 0x4000, 0x1808: 0x4000, 0x1809: 0x4000, 0x180a: 0x4000, 0x180b: 0x4000, + 0x180c: 0x4000, 0x180d: 0x4000, 0x180e: 0x4000, 0x180f: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, + // Block 0x61, offset 0x1840 + 0x1840: 0x4000, + // Block 0x62, offset 0x1880 + 0x1880: 0x2000, 0x1881: 0x2000, 0x1882: 0x2000, 0x1883: 0x2000, 0x1884: 0x2000, 0x1885: 0x2000, + 0x1886: 0x2000, 0x1887: 0x2000, 0x1888: 0x2000, 0x1889: 0x2000, 0x188a: 0x2000, 0x188b: 0x2000, + 0x188c: 0x2000, 0x188d: 0x2000, 0x188e: 0x2000, 0x188f: 0x2000, 0x1890: 0x2000, 0x1891: 0x2000, + 0x1892: 0x2000, 0x1893: 0x2000, 0x1894: 0x2000, 0x1895: 0x2000, 0x1896: 0x2000, 0x1897: 0x2000, + 0x1898: 0x2000, 0x1899: 0x2000, 0x189a: 0x2000, 0x189b: 0x2000, 0x189c: 0x2000, 0x189d: 0x2000, + 0x189e: 0x2000, 0x189f: 0x2000, 0x18a0: 0x2000, 0x18a1: 0x2000, 0x18a2: 0x2000, 0x18a3: 0x2000, + 0x18a4: 0x2000, 0x18a5: 0x2000, 0x18a6: 0x2000, 0x18a7: 0x2000, 0x18a8: 0x2000, 0x18a9: 0x2000, + 0x18aa: 0x2000, 0x18ab: 0x2000, 0x18ac: 0x2000, 0x18ad: 0x2000, 0x18ae: 0x2000, 0x18af: 0x2000, + 0x18b0: 0x2000, 0x18b1: 0x2000, 0x18b2: 0x2000, 0x18b3: 0x2000, 0x18b4: 0x2000, 0x18b5: 0x2000, + 0x18b6: 0x2000, 0x18b7: 0x2000, 0x18b8: 0x2000, 0x18b9: 0x2000, 0x18ba: 0x2000, 0x18bb: 0x2000, + 0x18bc: 0x2000, 0x18bd: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, + 0x265: 0x3d, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, + // Block 0xd, offset 0x340 + 0x37f: 0x45, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x48, + // Block 0x10, offset 0x400 + 0x400: 0x49, 0x403: 0x4a, 0x404: 0x4b, 0x405: 0x4c, 0x406: 0x4d, + 0x408: 0x4e, 0x409: 0x4f, 0x40c: 0x50, 0x40d: 0x51, 0x40e: 0x52, 0x40f: 0x53, + 0x410: 0x3a, 0x411: 0x54, 0x412: 0x0e, 0x413: 0x55, 0x414: 0x56, 0x415: 0x57, 0x416: 0x58, 0x417: 0x59, + 0x418: 0x0e, 0x419: 0x5a, 0x41a: 0x0e, 0x41b: 0x5b, + 0x424: 0x5c, 0x425: 0x5d, 0x426: 0x5e, 0x427: 0x5f, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x60, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 14680 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/transform.go b/vendor/golang.org/x/text/width/transform.go new file mode 100644 index 000000000..0049f700a --- /dev/null +++ b/vendor/golang.org/x/text/width/transform.go @@ -0,0 +1,239 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package width + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +type foldTransform struct { + transform.NopResetter +} + +func (foldTransform) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + if src[n] < utf8.RuneSelf { + // ASCII fast path. + for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ { + } + continue + } + v, size := trie.lookup(src[n:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + err = transform.ErrShortSrc + } else { + n = len(src) + } + break + } + if elem(v)&tagNeedsFold != 0 { + err = transform.ErrEndOfSpan + break + } + n += size + } + return n, err +} + +func (foldTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + if src[nSrc] < utf8.RuneSelf { + // ASCII fast path. + start, end := nSrc, len(src) + if d := len(dst) - nDst; d < end-start { + end = nSrc + d + } + for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { + } + n := copy(dst[nDst:], src[start:nSrc]) + if nDst += n; nDst == len(dst) { + nSrc = start + n + if nSrc == len(src) { + return nDst, nSrc, nil + } + if src[nSrc] < utf8.RuneSelf { + return nDst, nSrc, transform.ErrShortDst + } + } + continue + } + v, size := trie.lookup(src[nSrc:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 // gobble 1 byte + } + if elem(v)&tagNeedsFold == 0 { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + } else { + data := inverseData[byte(v)] + if len(dst)-nDst < int(data[0]) { + return nDst, nSrc, transform.ErrShortDst + } + i := 1 + for end := int(data[0]); i < end; i++ { + dst[nDst] = data[i] + nDst++ + } + dst[nDst] = data[i] ^ src[nSrc+size-1] + nDst++ + } + nSrc += size + } + return nDst, nSrc, nil +} + +type narrowTransform struct { + transform.NopResetter +} + +func (narrowTransform) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + if src[n] < utf8.RuneSelf { + // ASCII fast path. + for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ { + } + continue + } + v, size := trie.lookup(src[n:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + err = transform.ErrShortSrc + } else { + n = len(src) + } + break + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { + } else { + err = transform.ErrEndOfSpan + break + } + n += size + } + return n, err +} + +func (narrowTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + if src[nSrc] < utf8.RuneSelf { + // ASCII fast path. + start, end := nSrc, len(src) + if d := len(dst) - nDst; d < end-start { + end = nSrc + d + } + for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { + } + n := copy(dst[nDst:], src[start:nSrc]) + if nDst += n; nDst == len(dst) { + nSrc = start + n + if nSrc == len(src) { + return nDst, nSrc, nil + } + if src[nSrc] < utf8.RuneSelf { + return nDst, nSrc, transform.ErrShortDst + } + } + continue + } + v, size := trie.lookup(src[nSrc:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 // gobble 1 byte + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + } else { + data := inverseData[byte(v)] + if len(dst)-nDst < int(data[0]) { + return nDst, nSrc, transform.ErrShortDst + } + i := 1 + for end := int(data[0]); i < end; i++ { + dst[nDst] = data[i] + nDst++ + } + dst[nDst] = data[i] ^ src[nSrc+size-1] + nDst++ + } + nSrc += size + } + return nDst, nSrc, nil +} + +type wideTransform struct { + transform.NopResetter +} + +func (wideTransform) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + // TODO: Consider ASCII fast path. Special-casing ASCII handling can + // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably + // not enough to warrant the extra code and complexity. + v, size := trie.lookup(src[n:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + err = transform.ErrShortSrc + } else { + n = len(src) + } + break + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { + } else { + err = transform.ErrEndOfSpan + break + } + n += size + } + return n, err +} + +func (wideTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + // TODO: Consider ASCII fast path. Special-casing ASCII handling can + // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably + // not enough to warrant the extra code and complexity. + v, size := trie.lookup(src[nSrc:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 // gobble 1 byte + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + } else { + data := inverseData[byte(v)] + if len(dst)-nDst < int(data[0]) { + return nDst, nSrc, transform.ErrShortDst + } + i := 1 + for end := int(data[0]); i < end; i++ { + dst[nDst] = data[i] + nDst++ + } + dst[nDst] = data[i] ^ src[nSrc+size-1] + nDst++ + } + nSrc += size + } + return nDst, nSrc, nil +} diff --git a/vendor/golang.org/x/text/width/trieval.go b/vendor/golang.org/x/text/width/trieval.go new file mode 100644 index 000000000..ca8e45fd1 --- /dev/null +++ b/vendor/golang.org/x/text/width/trieval.go @@ -0,0 +1,30 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package width + +// elem is an entry of the width trie. The high byte is used to encode the type +// of the rune. The low byte is used to store the index to a mapping entry in +// the inverseData array. +type elem uint16 + +const ( + tagNeutral elem = iota << typeShift + tagAmbiguous + tagWide + tagNarrow + tagFullwidth + tagHalfwidth +) + +const ( + numTypeBits = 3 + typeShift = 16 - numTypeBits + + // tagNeedsFold is true for all fullwidth and halfwidth runes except for + // the Won sign U+20A9. + tagNeedsFold = 0x1000 + + // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide + // variant. + wonSign rune = 0x20A9 +) diff --git a/vendor/golang.org/x/text/width/width.go b/vendor/golang.org/x/text/width/width.go new file mode 100644 index 000000000..f1639ca68 --- /dev/null +++ b/vendor/golang.org/x/text/width/width.go @@ -0,0 +1,206 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate stringer -type=Kind +//go:generate go run gen.go gen_common.go gen_trieval.go + +// Package width provides functionality for handling different widths in text. +// +// Wide characters behave like ideographs; they tend to allow line breaks after +// each character and remain upright in vertical text layout. Narrow characters +// are kept together in words or runs that are rotated sideways in vertical text +// layout. +// +// For more information, see http://unicode.org/reports/tr11/. +package width // import "golang.org/x/text/width" + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// TODO +// 1) Reduce table size by compressing blocks. +// 2) API proposition for computing display length +// (approximation, fixed pitch only). +// 3) Implement display length. + +// Kind indicates the type of width property as defined in http://unicode.org/reports/tr11/. +type Kind int + +const ( + // Neutral characters do not occur in legacy East Asian character sets. + Neutral Kind = iota + + // EastAsianAmbiguous characters that can be sometimes wide and sometimes + // narrow and require additional information not contained in the character + // code to further resolve their width. + EastAsianAmbiguous + + // EastAsianWide characters are wide in its usual form. They occur only in + // the context of East Asian typography. These runes may have explicit + // halfwidth counterparts. + EastAsianWide + + // EastAsianNarrow characters are narrow in its usual form. They often have + // fullwidth counterparts. + EastAsianNarrow + + // Note: there exist Narrow runes that do not have fullwidth or wide + // counterparts, despite what the definition says (e.g. U+27E6). + + // EastAsianFullwidth characters have a compatibility decompositions of type + // wide that map to a narrow counterpart. + EastAsianFullwidth + + // EastAsianHalfwidth characters have a compatibility decomposition of type + // narrow that map to a wide or ambiguous counterpart, plus U+20A9 ₩ WON + // SIGN. + EastAsianHalfwidth + + // Note: there exist runes that have a halfwidth counterparts but that are + // classified as Ambiguous, rather than wide (e.g. U+2190). +) + +// TODO: the generated tries need to return size 1 for invalid runes for the +// width to be computed correctly (each byte should render width 1) + +var trie = newWidthTrie(0) + +// Lookup reports the Properties of the first rune in b and the number of bytes +// of its UTF-8 encoding. +func Lookup(b []byte) (p Properties, size int) { + v, sz := trie.lookup(b) + return Properties{elem(v), b[sz-1]}, sz +} + +// LookupString reports the Properties of the first rune in s and the number of +// bytes of its UTF-8 encoding. +func LookupString(s string) (p Properties, size int) { + v, sz := trie.lookupString(s) + return Properties{elem(v), s[sz-1]}, sz +} + +// LookupRune reports the Properties of rune r. +func LookupRune(r rune) Properties { + var buf [4]byte + n := utf8.EncodeRune(buf[:], r) + v, _ := trie.lookup(buf[:n]) + last := byte(r) + if r >= utf8.RuneSelf { + last = 0x80 + byte(r&0x3f) + } + return Properties{elem(v), last} +} + +// Properties provides access to width properties of a rune. +type Properties struct { + elem elem + last byte +} + +func (e elem) kind() Kind { + return Kind(e >> typeShift) +} + +// Kind returns the Kind of a rune as defined in Unicode TR #11. +// See http://unicode.org/reports/tr11/ for more details. +func (p Properties) Kind() Kind { + return p.elem.kind() +} + +// Folded returns the folded variant of a rune or 0 if the rune is canonical. +func (p Properties) Folded() rune { + if p.elem&tagNeedsFold != 0 { + buf := inverseData[byte(p.elem)] + buf[buf[0]] ^= p.last + r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) + return r + } + return 0 +} + +// Narrow returns the narrow variant of a rune or 0 if the rune is already +// narrow or doesn't have a narrow variant. +func (p Properties) Narrow() rune { + if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianFullwidth || k == EastAsianWide || k == EastAsianAmbiguous) { + buf := inverseData[byte(p.elem)] + buf[buf[0]] ^= p.last + r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) + return r + } + return 0 +} + +// Wide returns the wide variant of a rune or 0 if the rune is already +// wide or doesn't have a wide variant. +func (p Properties) Wide() rune { + if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianHalfwidth || k == EastAsianNarrow) { + buf := inverseData[byte(p.elem)] + buf[buf[0]] ^= p.last + r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) + return r + } + return 0 +} + +// TODO for Properties: +// - Add Fullwidth/Halfwidth or Inverted methods for computing variants +// mapping. +// - Add width information (including information on non-spacing runes). + +// Transformer implements the transform.Transformer interface. +type Transformer struct { + t transform.SpanningTransformer +} + +// Reset implements the transform.Transformer interface. +func (t Transformer) Reset() { t.t.Reset() } + +// Transform implements the transform.Transformer interface. +func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return t.t.Transform(dst, src, atEOF) +} + +// Span implements the transform.SpanningTransformer interface. +func (t Transformer) Span(src []byte, atEOF bool) (n int, err error) { + return t.t.Span(src, atEOF) +} + +// Bytes returns a new byte slice with the result of applying t to b. +func (t Transformer) Bytes(b []byte) []byte { + b, _, _ = transform.Bytes(t, b) + return b +} + +// String returns a string with the result of applying t to s. +func (t Transformer) String(s string) string { + s, _, _ = transform.String(t, s) + return s +} + +var ( + // Fold is a transform that maps all runes to their canonical width. + // + // Note that the NFKC and NFKD transforms in golang.org/x/text/unicode/norm + // provide a more generic folding mechanism. + Fold Transformer = Transformer{foldTransform{}} + + // Widen is a transform that maps runes to their wide variant, if + // available. + Widen Transformer = Transformer{wideTransform{}} + + // Narrow is a transform that maps runes to their narrow variant, if + // available. + Narrow Transformer = Transformer{narrowTransform{}} +) + +// TODO: Consider the following options: +// - Treat Ambiguous runes that have a halfwidth counterpart as wide, or some +// generalized variant of this. +// - Consider a wide Won character to be the default width (or some generalized +// variant of this). +// - Filter the set of characters that gets converted (the preferred approach is +// to allow applying filters to transforms). diff --git a/vendor/gopkg.in/inf.v0/LICENSE b/vendor/gopkg.in/inf.v0/LICENSE new file mode 100644 index 000000000..87a5cede3 --- /dev/null +++ b/vendor/gopkg.in/inf.v0/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/inf.v0/dec.go b/vendor/gopkg.in/inf.v0/dec.go new file mode 100644 index 000000000..3b4afedf1 --- /dev/null +++ b/vendor/gopkg.in/inf.v0/dec.go @@ -0,0 +1,615 @@ +// Package inf (type inf.Dec) implements "infinite-precision" decimal +// arithmetic. +// "Infinite precision" describes two characteristics: practically unlimited +// precision for decimal number representation and no support for calculating +// with any specific fixed precision. +// (Although there is no practical limit on precision, inf.Dec can only +// represent finite decimals.) +// +// This package is currently in experimental stage and the API may change. +// +// This package does NOT support: +// - rounding to specific precisions (as opposed to specific decimal positions) +// - the notion of context (each rounding must be explicit) +// - NaN and Inf values, and distinguishing between positive and negative zero +// - conversions to and from float32/64 types +// +// Features considered for possible addition: +// + formatting options +// + Exp method +// + combined operations such as AddRound/MulAdd etc +// + exchanging data in decimal32/64/128 formats +// +package inf // import "gopkg.in/inf.v0" + +// TODO: +// - avoid excessive deep copying (quo and rounders) + +import ( + "fmt" + "io" + "math/big" + "strings" +) + +// A Dec represents a signed arbitrary-precision decimal. +// It is a combination of a sign, an arbitrary-precision integer coefficient +// value, and a signed fixed-precision exponent value. +// The sign and the coefficient value are handled together as a signed value +// and referred to as the unscaled value. +// (Positive and negative zero values are not distinguished.) +// Since the exponent is most commonly non-positive, it is handled in negated +// form and referred to as scale. +// +// The mathematical value of a Dec equals: +// +// unscaled * 10**(-scale) +// +// Note that different Dec representations may have equal mathematical values. +// +// unscaled scale String() +// ------------------------- +// 0 0 "0" +// 0 2 "0.00" +// 0 -2 "0" +// 1 0 "1" +// 100 2 "1.00" +// 10 0 "10" +// 1 -1 "10" +// +// The zero value for a Dec represents the value 0 with scale 0. +// +// Operations are typically performed through the *Dec type. +// The semantics of the assignment operation "=" for "bare" Dec values is +// undefined and should not be relied on. +// +// Methods are typically of the form: +// +// func (z *Dec) Op(x, y *Dec) *Dec +// +// and implement operations z = x Op y with the result as receiver; if it +// is one of the operands it may be overwritten (and its memory reused). +// To enable chaining of operations, the result is also returned. Methods +// returning a result other than *Dec take one of the operands as the receiver. +// +// A "bare" Quo method (quotient / division operation) is not provided, as the +// result is not always a finite decimal and thus in general cannot be +// represented as a Dec. +// Instead, in the common case when rounding is (potentially) necessary, +// QuoRound should be used with a Scale and a Rounder. +// QuoExact or QuoRound with RoundExact can be used in the special cases when it +// is known that the result is always a finite decimal. +// +type Dec struct { + unscaled big.Int + scale Scale +} + +// Scale represents the type used for the scale of a Dec. +type Scale int32 + +const scaleSize = 4 // bytes in a Scale value + +// Scaler represents a method for obtaining the scale to use for the result of +// an operation on x and y. +type scaler interface { + Scale(x *Dec, y *Dec) Scale +} + +var bigInt = [...]*big.Int{ + big.NewInt(0), big.NewInt(1), big.NewInt(2), big.NewInt(3), big.NewInt(4), + big.NewInt(5), big.NewInt(6), big.NewInt(7), big.NewInt(8), big.NewInt(9), + big.NewInt(10), +} + +var exp10cache [64]big.Int = func() [64]big.Int { + e10, e10i := [64]big.Int{}, bigInt[1] + for i, _ := range e10 { + e10[i].Set(e10i) + e10i = new(big.Int).Mul(e10i, bigInt[10]) + } + return e10 +}() + +// NewDec allocates and returns a new Dec set to the given int64 unscaled value +// and scale. +func NewDec(unscaled int64, scale Scale) *Dec { + return new(Dec).SetUnscaled(unscaled).SetScale(scale) +} + +// NewDecBig allocates and returns a new Dec set to the given *big.Int unscaled +// value and scale. +func NewDecBig(unscaled *big.Int, scale Scale) *Dec { + return new(Dec).SetUnscaledBig(unscaled).SetScale(scale) +} + +// Scale returns the scale of x. +func (x *Dec) Scale() Scale { + return x.scale +} + +// Unscaled returns the unscaled value of x for u and true for ok when the +// unscaled value can be represented as int64; otherwise it returns an undefined +// int64 value for u and false for ok. Use x.UnscaledBig().Int64() to avoid +// checking the validity of the value when the check is known to be redundant. +func (x *Dec) Unscaled() (u int64, ok bool) { + u = x.unscaled.Int64() + var i big.Int + ok = i.SetInt64(u).Cmp(&x.unscaled) == 0 + return +} + +// UnscaledBig returns the unscaled value of x as *big.Int. +func (x *Dec) UnscaledBig() *big.Int { + return &x.unscaled +} + +// SetScale sets the scale of z, with the unscaled value unchanged, and returns +// z. +// The mathematical value of the Dec changes as if it was multiplied by +// 10**(oldscale-scale). +func (z *Dec) SetScale(scale Scale) *Dec { + z.scale = scale + return z +} + +// SetUnscaled sets the unscaled value of z, with the scale unchanged, and +// returns z. +func (z *Dec) SetUnscaled(unscaled int64) *Dec { + z.unscaled.SetInt64(unscaled) + return z +} + +// SetUnscaledBig sets the unscaled value of z, with the scale unchanged, and +// returns z. +func (z *Dec) SetUnscaledBig(unscaled *big.Int) *Dec { + z.unscaled.Set(unscaled) + return z +} + +// Set sets z to the value of x and returns z. +// It does nothing if z == x. +func (z *Dec) Set(x *Dec) *Dec { + if z != x { + z.SetUnscaledBig(x.UnscaledBig()) + z.SetScale(x.Scale()) + } + return z +} + +// Sign returns: +// +// -1 if x < 0 +// 0 if x == 0 +// +1 if x > 0 +// +func (x *Dec) Sign() int { + return x.UnscaledBig().Sign() +} + +// Neg sets z to -x and returns z. +func (z *Dec) Neg(x *Dec) *Dec { + z.SetScale(x.Scale()) + z.UnscaledBig().Neg(x.UnscaledBig()) + return z +} + +// Cmp compares x and y and returns: +// +// -1 if x < y +// 0 if x == y +// +1 if x > y +// +func (x *Dec) Cmp(y *Dec) int { + xx, yy := upscale(x, y) + return xx.UnscaledBig().Cmp(yy.UnscaledBig()) +} + +// Abs sets z to |x| (the absolute value of x) and returns z. +func (z *Dec) Abs(x *Dec) *Dec { + z.SetScale(x.Scale()) + z.UnscaledBig().Abs(x.UnscaledBig()) + return z +} + +// Add sets z to the sum x+y and returns z. +// The scale of z is the greater of the scales of x and y. +func (z *Dec) Add(x, y *Dec) *Dec { + xx, yy := upscale(x, y) + z.SetScale(xx.Scale()) + z.UnscaledBig().Add(xx.UnscaledBig(), yy.UnscaledBig()) + return z +} + +// Sub sets z to the difference x-y and returns z. +// The scale of z is the greater of the scales of x and y. +func (z *Dec) Sub(x, y *Dec) *Dec { + xx, yy := upscale(x, y) + z.SetScale(xx.Scale()) + z.UnscaledBig().Sub(xx.UnscaledBig(), yy.UnscaledBig()) + return z +} + +// Mul sets z to the product x*y and returns z. +// The scale of z is the sum of the scales of x and y. +func (z *Dec) Mul(x, y *Dec) *Dec { + z.SetScale(x.Scale() + y.Scale()) + z.UnscaledBig().Mul(x.UnscaledBig(), y.UnscaledBig()) + return z +} + +// Round sets z to the value of x rounded to Scale s using Rounder r, and +// returns z. +func (z *Dec) Round(x *Dec, s Scale, r Rounder) *Dec { + return z.QuoRound(x, NewDec(1, 0), s, r) +} + +// QuoRound sets z to the quotient x/y, rounded using the given Rounder to the +// specified scale. +// +// If the rounder is RoundExact but the result can not be expressed exactly at +// the specified scale, QuoRound returns nil, and the value of z is undefined. +// +// There is no corresponding Div method; the equivalent can be achieved through +// the choice of Rounder used. +// +func (z *Dec) QuoRound(x, y *Dec, s Scale, r Rounder) *Dec { + return z.quo(x, y, sclr{s}, r) +} + +func (z *Dec) quo(x, y *Dec, s scaler, r Rounder) *Dec { + scl := s.Scale(x, y) + var zzz *Dec + if r.UseRemainder() { + zz, rA, rB := new(Dec).quoRem(x, y, scl, true, new(big.Int), new(big.Int)) + zzz = r.Round(new(Dec), zz, rA, rB) + } else { + zz, _, _ := new(Dec).quoRem(x, y, scl, false, nil, nil) + zzz = r.Round(new(Dec), zz, nil, nil) + } + if zzz == nil { + return nil + } + return z.Set(zzz) +} + +// QuoExact sets z to the quotient x/y and returns z when x/y is a finite +// decimal. Otherwise it returns nil and the value of z is undefined. +// +// The scale of a non-nil result is "x.Scale() - y.Scale()" or greater; it is +// calculated so that the remainder will be zero whenever x/y is a finite +// decimal. +func (z *Dec) QuoExact(x, y *Dec) *Dec { + return z.quo(x, y, scaleQuoExact{}, RoundExact) +} + +// quoRem sets z to the quotient x/y with the scale s, and if useRem is true, +// it sets remNum and remDen to the numerator and denominator of the remainder. +// It returns z, remNum and remDen. +// +// The remainder is normalized to the range -1 < r < 1 to simplify rounding; +// that is, the results satisfy the following equation: +// +// x / y = z + (remNum/remDen) * 10**(-z.Scale()) +// +// See Rounder for more details about rounding. +// +func (z *Dec) quoRem(x, y *Dec, s Scale, useRem bool, + remNum, remDen *big.Int) (*Dec, *big.Int, *big.Int) { + // difference (required adjustment) compared to "canonical" result scale + shift := s - (x.Scale() - y.Scale()) + // pointers to adjusted unscaled dividend and divisor + var ix, iy *big.Int + switch { + case shift > 0: + // increased scale: decimal-shift dividend left + ix = new(big.Int).Mul(x.UnscaledBig(), exp10(shift)) + iy = y.UnscaledBig() + case shift < 0: + // decreased scale: decimal-shift divisor left + ix = x.UnscaledBig() + iy = new(big.Int).Mul(y.UnscaledBig(), exp10(-shift)) + default: + ix = x.UnscaledBig() + iy = y.UnscaledBig() + } + // save a copy of iy in case it to be overwritten with the result + iy2 := iy + if iy == z.UnscaledBig() { + iy2 = new(big.Int).Set(iy) + } + // set scale + z.SetScale(s) + // set unscaled + if useRem { + // Int division + _, intr := z.UnscaledBig().QuoRem(ix, iy, new(big.Int)) + // set remainder + remNum.Set(intr) + remDen.Set(iy2) + } else { + z.UnscaledBig().Quo(ix, iy) + } + return z, remNum, remDen +} + +type sclr struct{ s Scale } + +func (s sclr) Scale(x, y *Dec) Scale { + return s.s +} + +type scaleQuoExact struct{} + +func (sqe scaleQuoExact) Scale(x, y *Dec) Scale { + rem := new(big.Rat).SetFrac(x.UnscaledBig(), y.UnscaledBig()) + f2, f5 := factor2(rem.Denom()), factor(rem.Denom(), bigInt[5]) + var f10 Scale + if f2 > f5 { + f10 = Scale(f2) + } else { + f10 = Scale(f5) + } + return x.Scale() - y.Scale() + f10 +} + +func factor(n *big.Int, p *big.Int) int { + // could be improved for large factors + d, f := n, 0 + for { + dd, dm := new(big.Int).DivMod(d, p, new(big.Int)) + if dm.Sign() == 0 { + f++ + d = dd + } else { + break + } + } + return f +} + +func factor2(n *big.Int) int { + // could be improved for large factors + f := 0 + for ; n.Bit(f) == 0; f++ { + } + return f +} + +func upscale(a, b *Dec) (*Dec, *Dec) { + if a.Scale() == b.Scale() { + return a, b + } + if a.Scale() > b.Scale() { + bb := b.rescale(a.Scale()) + return a, bb + } + aa := a.rescale(b.Scale()) + return aa, b +} + +func exp10(x Scale) *big.Int { + if int(x) < len(exp10cache) { + return &exp10cache[int(x)] + } + return new(big.Int).Exp(bigInt[10], big.NewInt(int64(x)), nil) +} + +func (x *Dec) rescale(newScale Scale) *Dec { + shift := newScale - x.Scale() + switch { + case shift < 0: + e := exp10(-shift) + return NewDecBig(new(big.Int).Quo(x.UnscaledBig(), e), newScale) + case shift > 0: + e := exp10(shift) + return NewDecBig(new(big.Int).Mul(x.UnscaledBig(), e), newScale) + } + return x +} + +var zeros = []byte("00000000000000000000000000000000" + + "00000000000000000000000000000000") +var lzeros = Scale(len(zeros)) + +func appendZeros(s []byte, n Scale) []byte { + for i := Scale(0); i < n; i += lzeros { + if n > i+lzeros { + s = append(s, zeros...) + } else { + s = append(s, zeros[0:n-i]...) + } + } + return s +} + +func (x *Dec) String() string { + if x == nil { + return "" + } + scale := x.Scale() + s := []byte(x.UnscaledBig().String()) + if scale <= 0 { + if scale != 0 && x.unscaled.Sign() != 0 { + s = appendZeros(s, -scale) + } + return string(s) + } + negbit := Scale(-((x.Sign() - 1) / 2)) + // scale > 0 + lens := Scale(len(s)) + if lens-negbit <= scale { + ss := make([]byte, 0, scale+2) + if negbit == 1 { + ss = append(ss, '-') + } + ss = append(ss, '0', '.') + ss = appendZeros(ss, scale-lens+negbit) + ss = append(ss, s[negbit:]...) + return string(ss) + } + // lens > scale + ss := make([]byte, 0, lens+1) + ss = append(ss, s[:lens-scale]...) + ss = append(ss, '.') + ss = append(ss, s[lens-scale:]...) + return string(ss) +} + +// Format is a support routine for fmt.Formatter. It accepts the decimal +// formats 'd' and 'f', and handles both equivalently. +// Width, precision, flags and bases 2, 8, 16 are not supported. +func (x *Dec) Format(s fmt.State, ch rune) { + if ch != 'd' && ch != 'f' && ch != 'v' && ch != 's' { + fmt.Fprintf(s, "%%!%c(dec.Dec=%s)", ch, x.String()) + return + } + fmt.Fprintf(s, x.String()) +} + +func (z *Dec) scan(r io.RuneScanner) (*Dec, error) { + unscaled := make([]byte, 0, 256) // collects chars of unscaled as bytes + dp, dg := -1, -1 // indexes of decimal point, first digit +loop: + for { + ch, _, err := r.ReadRune() + if err == io.EOF { + break loop + } + if err != nil { + return nil, err + } + switch { + case ch == '+' || ch == '-': + if len(unscaled) > 0 || dp >= 0 { // must be first character + r.UnreadRune() + break loop + } + case ch == '.': + if dp >= 0 { + r.UnreadRune() + break loop + } + dp = len(unscaled) + continue // don't add to unscaled + case ch >= '0' && ch <= '9': + if dg == -1 { + dg = len(unscaled) + } + default: + r.UnreadRune() + break loop + } + unscaled = append(unscaled, byte(ch)) + } + if dg == -1 { + return nil, fmt.Errorf("no digits read") + } + if dp >= 0 { + z.SetScale(Scale(len(unscaled) - dp)) + } else { + z.SetScale(0) + } + _, ok := z.UnscaledBig().SetString(string(unscaled), 10) + if !ok { + return nil, fmt.Errorf("invalid decimal: %s", string(unscaled)) + } + return z, nil +} + +// SetString sets z to the value of s, interpreted as a decimal (base 10), +// and returns z and a boolean indicating success. The scale of z is the +// number of digits after the decimal point (including any trailing 0s), +// or 0 if there is no decimal point. If SetString fails, the value of z +// is undefined but the returned value is nil. +func (z *Dec) SetString(s string) (*Dec, bool) { + r := strings.NewReader(s) + _, err := z.scan(r) + if err != nil { + return nil, false + } + _, _, err = r.ReadRune() + if err != io.EOF { + return nil, false + } + // err == io.EOF => scan consumed all of s + return z, true +} + +// Scan is a support routine for fmt.Scanner; it sets z to the value of +// the scanned number. It accepts the decimal formats 'd' and 'f', and +// handles both equivalently. Bases 2, 8, 16 are not supported. +// The scale of z is the number of digits after the decimal point +// (including any trailing 0s), or 0 if there is no decimal point. +func (z *Dec) Scan(s fmt.ScanState, ch rune) error { + if ch != 'd' && ch != 'f' && ch != 's' && ch != 'v' { + return fmt.Errorf("Dec.Scan: invalid verb '%c'", ch) + } + s.SkipSpace() + _, err := z.scan(s) + return err +} + +// Gob encoding version +const decGobVersion byte = 1 + +func scaleBytes(s Scale) []byte { + buf := make([]byte, scaleSize) + i := scaleSize + for j := 0; j < scaleSize; j++ { + i-- + buf[i] = byte(s) + s >>= 8 + } + return buf +} + +func scale(b []byte) (s Scale) { + for j := 0; j < scaleSize; j++ { + s <<= 8 + s |= Scale(b[j]) + } + return +} + +// GobEncode implements the gob.GobEncoder interface. +func (x *Dec) GobEncode() ([]byte, error) { + buf, err := x.UnscaledBig().GobEncode() + if err != nil { + return nil, err + } + buf = append(append(buf, scaleBytes(x.Scale())...), decGobVersion) + return buf, nil +} + +// GobDecode implements the gob.GobDecoder interface. +func (z *Dec) GobDecode(buf []byte) error { + if len(buf) == 0 { + return fmt.Errorf("Dec.GobDecode: no data") + } + b := buf[len(buf)-1] + if b != decGobVersion { + return fmt.Errorf("Dec.GobDecode: encoding version %d not supported", b) + } + l := len(buf) - scaleSize - 1 + err := z.UnscaledBig().GobDecode(buf[:l]) + if err != nil { + return err + } + z.SetScale(scale(buf[l : l+scaleSize])) + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (x *Dec) MarshalText() ([]byte, error) { + return []byte(x.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (z *Dec) UnmarshalText(data []byte) error { + _, ok := z.SetString(string(data)) + if !ok { + return fmt.Errorf("invalid inf.Dec") + } + return nil +} diff --git a/vendor/gopkg.in/inf.v0/rounder.go b/vendor/gopkg.in/inf.v0/rounder.go new file mode 100644 index 000000000..3a97ef529 --- /dev/null +++ b/vendor/gopkg.in/inf.v0/rounder.go @@ -0,0 +1,145 @@ +package inf + +import ( + "math/big" +) + +// Rounder represents a method for rounding the (possibly infinite decimal) +// result of a division to a finite Dec. It is used by Dec.Round() and +// Dec.Quo(). +// +// See the Example for results of using each Rounder with some sample values. +// +type Rounder rounder + +// See http://speleotrove.com/decimal/damodel.html#refround for more detailed +// definitions of these rounding modes. +var ( + RoundDown Rounder // towards 0 + RoundUp Rounder // away from 0 + RoundFloor Rounder // towards -infinity + RoundCeil Rounder // towards +infinity + RoundHalfDown Rounder // to nearest; towards 0 if same distance + RoundHalfUp Rounder // to nearest; away from 0 if same distance + RoundHalfEven Rounder // to nearest; even last digit if same distance +) + +// RoundExact is to be used in the case when rounding is not necessary. +// When used with Quo or Round, it returns the result verbatim when it can be +// expressed exactly with the given precision, and it returns nil otherwise. +// QuoExact is a shorthand for using Quo with RoundExact. +var RoundExact Rounder + +type rounder interface { + + // When UseRemainder() returns true, the Round() method is passed the + // remainder of the division, expressed as the numerator and denominator of + // a rational. + UseRemainder() bool + + // Round sets the rounded value of a quotient to z, and returns z. + // quo is rounded down (truncated towards zero) to the scale obtained from + // the Scaler in Quo(). + // + // When the remainder is not used, remNum and remDen are nil. + // When used, the remainder is normalized between -1 and 1; that is: + // + // -|remDen| < remNum < |remDen| + // + // remDen has the same sign as y, and remNum is zero or has the same sign + // as x. + Round(z, quo *Dec, remNum, remDen *big.Int) *Dec +} + +type rndr struct { + useRem bool + round func(z, quo *Dec, remNum, remDen *big.Int) *Dec +} + +func (r rndr) UseRemainder() bool { + return r.useRem +} + +func (r rndr) Round(z, quo *Dec, remNum, remDen *big.Int) *Dec { + return r.round(z, quo, remNum, remDen) +} + +var intSign = []*big.Int{big.NewInt(-1), big.NewInt(0), big.NewInt(1)} + +func roundHalf(f func(c int, odd uint) (roundUp bool)) func(z, q *Dec, rA, rB *big.Int) *Dec { + return func(z, q *Dec, rA, rB *big.Int) *Dec { + z.Set(q) + brA, brB := rA.BitLen(), rB.BitLen() + if brA < brB-1 { + // brA < brB-1 => |rA| < |rB/2| + return z + } + roundUp := false + srA, srB := rA.Sign(), rB.Sign() + s := srA * srB + if brA == brB-1 { + rA2 := new(big.Int).Lsh(rA, 1) + if s < 0 { + rA2.Neg(rA2) + } + roundUp = f(rA2.Cmp(rB)*srB, z.UnscaledBig().Bit(0)) + } else { + // brA > brB-1 => |rA| > |rB/2| + roundUp = true + } + if roundUp { + z.UnscaledBig().Add(z.UnscaledBig(), intSign[s+1]) + } + return z + } +} + +func init() { + RoundExact = rndr{true, + func(z, q *Dec, rA, rB *big.Int) *Dec { + if rA.Sign() != 0 { + return nil + } + return z.Set(q) + }} + RoundDown = rndr{false, + func(z, q *Dec, rA, rB *big.Int) *Dec { + return z.Set(q) + }} + RoundUp = rndr{true, + func(z, q *Dec, rA, rB *big.Int) *Dec { + z.Set(q) + if rA.Sign() != 0 { + z.UnscaledBig().Add(z.UnscaledBig(), intSign[rA.Sign()*rB.Sign()+1]) + } + return z + }} + RoundFloor = rndr{true, + func(z, q *Dec, rA, rB *big.Int) *Dec { + z.Set(q) + if rA.Sign()*rB.Sign() < 0 { + z.UnscaledBig().Add(z.UnscaledBig(), intSign[0]) + } + return z + }} + RoundCeil = rndr{true, + func(z, q *Dec, rA, rB *big.Int) *Dec { + z.Set(q) + if rA.Sign()*rB.Sign() > 0 { + z.UnscaledBig().Add(z.UnscaledBig(), intSign[2]) + } + return z + }} + RoundHalfDown = rndr{true, roundHalf( + func(c int, odd uint) bool { + return c > 0 + })} + RoundHalfUp = rndr{true, roundHalf( + func(c int, odd uint) bool { + return c >= 0 + })} + RoundHalfEven = rndr{true, roundHalf( + func(c int, odd uint) bool { + return c > 0 || c == 0 && odd == 1 + })} +} diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 000000000..a68e67f01 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,188 @@ + +Copyright (c) 2011-2014 - Canonical Inc. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 000000000..8da58fbf6 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 000000000..7b8bd8670 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 000000000..95ec014e8 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,742 @@ +package yaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 000000000..085cddc44 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,683 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + } + panic("unreachable") +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool + mapType reflect.Type + terrors []string +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() +) + +func newDecoder() *decoder { + d := &decoder{mapType: defaultMapType} + d.aliases = make(map[string]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + failf("unknown anchor '%s' referenced", n.value) + } + if d.aliases[n.value] { + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if s, ok := resolved.(string); ok && out.CanAddr() { + if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { + err := u.UnmarshalText([]byte(s)) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + good = true + } else if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + good = true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case uint64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + if !good { + d.terror(n, tag, out) + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + out.Set(out.Slice(0, j)) + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + inlineMap.SetMapIndex(name, value) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 000000000..2befd553e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } + return false +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceeded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceeded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceeded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceeded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 000000000..84f849955 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,306 @@ +package yaml + +import ( + "encoding" + "fmt" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() { + e.nilv() + return + } + iface := in.Interface() + if m, ok := iface.(Marshaler); ok { + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + } else if m, ok := iface.(encoding.TextMarshaler); ok { + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + rtag, rs := resolve("", s) + if rtag == yaml_BINARY_TAG { + if tag == "" || tag == yaml_STR_TAG { + tag = rtag + s = rs.(string) + } else if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } else { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + } + if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else if strings.Contains(s, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 000000000..0a7037ad1 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1096 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } + return false +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 000000000..d5fb09727 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,391 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + } + buffer_len += width + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 000000000..93a863274 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,203 @@ +package yaml + +import ( + "encoding/base64" + "math" + "strconv" + "strings" + "unicode/utf8" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + return true + } + return false +} + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt(plain[3:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, -int(intv) + } else { + return yaml_INT_TAG, -intv + } + } + } + // XXX Handle timestamps here. + + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + if tag == yaml_BINARY_TAG { + return yaml_BINARY_TAG, in + } + if utf8.ValidString(in) { + return yaml_STR_TAG, in + } + return yaml_BINARY_TAG, encodeBase64(in) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 000000000..d97d76fa5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2710 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found uknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && !(s[0] == '!' && s[1] == 0) { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the tag is non-empty. + if len(s) == 0 { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 000000000..5958822f9 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,104 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 000000000..190362f25 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,89 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 000000000..d133edf9d --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,346 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only unmarshalled if they are exported (have an upper case +// first letter), and are unmarshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int "a,omitempty" +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 000000000..d60a6b6b0 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,716 @@ +package yaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occured. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 000000000..8110ce3c3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/k8s.io/apimachinery/LICENSE b/vendor/k8s.io/apimachinery/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/apimachinery/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apimachinery/README.md b/vendor/k8s.io/apimachinery/README.md new file mode 100644 index 000000000..98899fb58 --- /dev/null +++ b/vendor/k8s.io/apimachinery/README.md @@ -0,0 +1,29 @@ +# apimachinery + +Scheme, typing, encoding, decoding, and conversion packages for Kubernetes and Kubernetes-like API objects. + + +## Purpose + +This library is a shared dependency for servers and clients to work with Kubernetes API infrastructure without direct +type dependencies. It's first comsumers are `k8s.io/kubernetes`, `k8s.io/client-go`, and `k8s.io/apiserver`. + + +## Compatibility + +There are *NO compatibility guarantees* for this repository. It is in direct support of Kubernetes, so branches +will track Kubernetes and be compatible with that repo. As we more cleanly separate the layers, we will review the +compatibility guarantee. + + +## Where does it come from? + +`apimachinery` is synced from https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery. +Code changes are made in that location, merged into `k8s.io/kubernetes` and later synced here. + + +## Things you should *NOT* do + + 1. Add API types to this repo. This is for the machinery, not for the types. + 2. Directly modify any files under `pkg` in this repo. Those are driven from `k8s.io/kuberenetes/staging/src/k8s.io/apimachinery`. + 3. Expect compatibility. This repo is direct support of Kubernetes and the API isn't yet stable enough for API guarantees. \ No newline at end of file diff --git a/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go new file mode 100644 index 000000000..3ebd8c719 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go @@ -0,0 +1,46 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package equality + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" +) + +// Semantic can do semantic deep equality checks for api objects. +// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true +var Semantic = conversion.EqualitiesOrDie( + func(a, b resource.Quantity) bool { + // Ignore formatting, only care that numeric value stayed the same. + // TODO: if we decide it's important, it should be safe to start comparing the format. + // + // Uninitialized quantities are equivalent to 0 quantities. + return a.Cmp(b) == 0 + }, + func(a, b metav1.Time) bool { + return a.UTC() == b.UTC() + }, + func(a, b labels.Selector) bool { + return a.String() == b.String() + }, + func(a, b fields.Selector) bool { + return a.String() == b.String() + }, +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go new file mode 100644 index 000000000..167baf680 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package errors provides detailed error types for api field validation. +package errors // import "k8s.io/apimachinery/pkg/api/errors" diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go new file mode 100644 index 000000000..560c889b9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -0,0 +1,478 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// HTTP Status codes not in the golang http package. +const ( + StatusUnprocessableEntity = 422 + StatusTooManyRequests = 429 + // StatusServerTimeout is an indication that a transient server error has + // occurred and the client *should* retry, with an optional Retry-After + // header to specify the back off window. + StatusServerTimeout = 504 +) + +// StatusError is an error intended for consumption by a REST API server; it can also be +// reconstructed by clients from a REST response. Public to allow easy type switches. +type StatusError struct { + ErrStatus metav1.Status +} + +// APIStatus is exposed by errors that can be converted to an api.Status object +// for finer grained details. +type APIStatus interface { + Status() metav1.Status +} + +var _ error = &StatusError{} + +// Error implements the Error interface. +func (e *StatusError) Error() string { + return e.ErrStatus.Message +} + +// Status allows access to e's status without having to know the detailed workings +// of StatusError. +func (e *StatusError) Status() metav1.Status { + return e.ErrStatus +} + +// DebugError reports extended info about the error to debug output. +func (e *StatusError) DebugError() (string, []interface{}) { + if out, err := json.MarshalIndent(e.ErrStatus, "", " "); err == nil { + return "server response object: %s", []interface{}{string(out)} + } + return "server response object: %#v", []interface{}{e.ErrStatus} +} + +// UnexpectedObjectError can be returned by FromObject if it's passed a non-status object. +type UnexpectedObjectError struct { + Object runtime.Object +} + +// Error returns an error message describing 'u'. +func (u *UnexpectedObjectError) Error() string { + return fmt.Sprintf("unexpected object: %v", u.Object) +} + +// FromObject generates an StatusError from an metav1.Status, if that is the type of obj; otherwise, +// returns an UnexpecteObjectError. +func FromObject(obj runtime.Object) error { + switch t := obj.(type) { + case *metav1.Status: + return &StatusError{*t} + } + return &UnexpectedObjectError{obj} +} + +// NewNotFound returns a new error which indicates that the resource of the kind and the name was not found. +func NewNotFound(qualifiedResource schema.GroupResource, name string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusNotFound, + Reason: metav1.StatusReasonNotFound, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + }, + Message: fmt.Sprintf("%s %q not found", qualifiedResource.String(), name), + }} +} + +// NewAlreadyExists returns an error indicating the item requested exists by that identifier. +func NewAlreadyExists(qualifiedResource schema.GroupResource, name string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusConflict, + Reason: metav1.StatusReasonAlreadyExists, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + }, + Message: fmt.Sprintf("%s %q already exists", qualifiedResource.String(), name), + }} +} + +// NewUnauthorized returns an error indicating the client is not authorized to perform the requested +// action. +func NewUnauthorized(reason string) *StatusError { + message := reason + if len(message) == 0 { + message = "not authorized" + } + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusUnauthorized, + Reason: metav1.StatusReasonUnauthorized, + Message: message, + }} +} + +// NewForbidden returns an error indicating the requested action was forbidden +func NewForbidden(qualifiedResource schema.GroupResource, name string, err error) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusForbidden, + Reason: metav1.StatusReasonForbidden, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + }, + Message: fmt.Sprintf("%s %q is forbidden: %v", qualifiedResource.String(), name, err), + }} +} + +// NewConflict returns an error indicating the item can't be updated as provided. +func NewConflict(qualifiedResource schema.GroupResource, name string, err error) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusConflict, + Reason: metav1.StatusReasonConflict, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + }, + Message: fmt.Sprintf("Operation cannot be fulfilled on %s %q: %v", qualifiedResource.String(), name, err), + }} +} + +// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known. +func NewGone(message string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusGone, + Reason: metav1.StatusReasonGone, + Message: message, + }} +} + +// NewInvalid returns an error indicating the item is invalid and cannot be processed. +func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorList) *StatusError { + causes := make([]metav1.StatusCause, 0, len(errs)) + for i := range errs { + err := errs[i] + causes = append(causes, metav1.StatusCause{ + Type: metav1.CauseType(err.Type), + Message: err.ErrorBody(), + Field: err.Field, + }) + } + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: StatusUnprocessableEntity, // RFC 4918: StatusUnprocessableEntity + Reason: metav1.StatusReasonInvalid, + Details: &metav1.StatusDetails{ + Group: qualifiedKind.Group, + Kind: qualifiedKind.Kind, + Name: name, + Causes: causes, + }, + Message: fmt.Sprintf("%s %q is invalid: %v", qualifiedKind.String(), name, errs.ToAggregate()), + }} +} + +// NewBadRequest creates an error that indicates that the request is invalid and can not be processed. +func NewBadRequest(reason string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusBadRequest, + Reason: metav1.StatusReasonBadRequest, + Message: reason, + }} +} + +// NewServiceUnavailable creates an error that indicates that the requested service is unavailable. +func NewServiceUnavailable(reason string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusServiceUnavailable, + Reason: metav1.StatusReasonServiceUnavailable, + Message: reason, + }} +} + +// NewMethodNotSupported returns an error indicating the requested action is not supported on this kind. +func NewMethodNotSupported(qualifiedResource schema.GroupResource, action string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusMethodNotAllowed, + Reason: metav1.StatusReasonMethodNotAllowed, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + }, + Message: fmt.Sprintf("%s is not supported on resources of kind %q", action, qualifiedResource.String()), + }} +} + +// NewServerTimeout returns an error indicating the requested action could not be completed due to a +// transient error, and the client should try again. +func NewServerTimeout(qualifiedResource schema.GroupResource, operation string, retryAfterSeconds int) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusInternalServerError, + Reason: metav1.StatusReasonServerTimeout, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: operation, + RetryAfterSeconds: int32(retryAfterSeconds), + }, + Message: fmt.Sprintf("The %s operation against %s could not be completed at this time, please try again.", operation, qualifiedResource.String()), + }} +} + +// NewServerTimeoutForKind should not exist. Server timeouts happen when accessing resources, the Kind is just what we +// happened to be looking at when the request failed. This delegates to keep code sane, but we should work towards removing this. +func NewServerTimeoutForKind(qualifiedKind schema.GroupKind, operation string, retryAfterSeconds int) *StatusError { + return NewServerTimeout(schema.GroupResource{Group: qualifiedKind.Group, Resource: qualifiedKind.Kind}, operation, retryAfterSeconds) +} + +// NewInternalError returns an error indicating the item is invalid and cannot be processed. +func NewInternalError(err error) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusInternalServerError, + Reason: metav1.StatusReasonInternalError, + Details: &metav1.StatusDetails{ + Causes: []metav1.StatusCause{{Message: err.Error()}}, + }, + Message: fmt.Sprintf("Internal error occurred: %v", err), + }} +} + +// NewTimeoutError returns an error indicating that a timeout occurred before the request +// could be completed. Clients may retry, but the operation may still complete. +func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: StatusServerTimeout, + Reason: metav1.StatusReasonTimeout, + Message: fmt.Sprintf("Timeout: %s", message), + Details: &metav1.StatusDetails{ + RetryAfterSeconds: int32(retryAfterSeconds), + }, + }} +} + +// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form. +func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError { + reason := metav1.StatusReasonUnknown + message := fmt.Sprintf("the server responded with the status code %d but did not return more information", code) + switch code { + case http.StatusConflict: + if verb == "POST" { + reason = metav1.StatusReasonAlreadyExists + } else { + reason = metav1.StatusReasonConflict + } + message = "the server reported a conflict" + case http.StatusNotFound: + reason = metav1.StatusReasonNotFound + message = "the server could not find the requested resource" + case http.StatusBadRequest: + reason = metav1.StatusReasonBadRequest + message = "the server rejected our request for an unknown reason" + case http.StatusUnauthorized: + reason = metav1.StatusReasonUnauthorized + message = "the server has asked for the client to provide credentials" + case http.StatusForbidden: + reason = metav1.StatusReasonForbidden + // the server message has details about who is trying to perform what action. Keep its message. + message = serverMessage + case http.StatusMethodNotAllowed: + reason = metav1.StatusReasonMethodNotAllowed + message = "the server does not allow this method on the requested resource" + case StatusUnprocessableEntity: + reason = metav1.StatusReasonInvalid + message = "the server rejected our request due to an error in our request" + case StatusServerTimeout: + reason = metav1.StatusReasonServerTimeout + message = "the server cannot complete the requested operation at this time, try again later" + case StatusTooManyRequests: + reason = metav1.StatusReasonTimeout + message = "the server has received too many requests and has asked us to try again later" + default: + if code >= 500 { + reason = metav1.StatusReasonInternalError + message = fmt.Sprintf("an error on the server (%q) has prevented the request from succeeding", serverMessage) + } + } + switch { + case !qualifiedResource.Empty() && len(name) > 0: + message = fmt.Sprintf("%s (%s %s %s)", message, strings.ToLower(verb), qualifiedResource.String(), name) + case !qualifiedResource.Empty(): + message = fmt.Sprintf("%s (%s %s)", message, strings.ToLower(verb), qualifiedResource.String()) + } + var causes []metav1.StatusCause + if isUnexpectedResponse { + causes = []metav1.StatusCause{ + { + Type: metav1.CauseTypeUnexpectedServerResponse, + Message: serverMessage, + }, + } + } else { + causes = nil + } + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: int32(code), + Reason: reason, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + + Causes: causes, + RetryAfterSeconds: int32(retryAfterSeconds), + }, + Message: message, + }} +} + +// IsNotFound returns true if the specified error was created by NewNotFound. +func IsNotFound(err error) bool { + return reasonForError(err) == metav1.StatusReasonNotFound +} + +// IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists. +func IsAlreadyExists(err error) bool { + return reasonForError(err) == metav1.StatusReasonAlreadyExists +} + +// IsConflict determines if the err is an error which indicates the provided update conflicts. +func IsConflict(err error) bool { + return reasonForError(err) == metav1.StatusReasonConflict +} + +// IsInvalid determines if the err is an error which indicates the provided resource is not valid. +func IsInvalid(err error) bool { + return reasonForError(err) == metav1.StatusReasonInvalid +} + +// IsMethodNotSupported determines if the err is an error which indicates the provided action could not +// be performed because it is not supported by the server. +func IsMethodNotSupported(err error) bool { + return reasonForError(err) == metav1.StatusReasonMethodNotAllowed +} + +// IsBadRequest determines if err is an error which indicates that the request is invalid. +func IsBadRequest(err error) bool { + return reasonForError(err) == metav1.StatusReasonBadRequest +} + +// IsUnauthorized determines if err is an error which indicates that the request is unauthorized and +// requires authentication by the user. +func IsUnauthorized(err error) bool { + return reasonForError(err) == metav1.StatusReasonUnauthorized +} + +// IsForbidden determines if err is an error which indicates that the request is forbidden and cannot +// be completed as requested. +func IsForbidden(err error) bool { + return reasonForError(err) == metav1.StatusReasonForbidden +} + +// IsTimeout determines if err is an error which indicates that request times out due to long +// processing. +func IsTimeout(err error) bool { + return reasonForError(err) == metav1.StatusReasonTimeout +} + +// IsServerTimeout determines if err is an error which indicates that the request needs to be retried +// by the client. +func IsServerTimeout(err error) bool { + return reasonForError(err) == metav1.StatusReasonServerTimeout +} + +// IsInternalError determines if err is an error which indicates an internal server error. +func IsInternalError(err error) bool { + return reasonForError(err) == metav1.StatusReasonInternalError +} + +// IsTooManyRequests determines if err is an error which indicates that there are too many requests +// that the server cannot handle. +// TODO: update IsTooManyRequests() when the TooManyRequests(429) error returned from the API server has a non-empty Reason field +func IsTooManyRequests(err error) bool { + switch t := err.(type) { + case APIStatus: + return t.Status().Code == StatusTooManyRequests + } + return false +} + +// IsUnexpectedServerError returns true if the server response was not in the expected API format, +// and may be the result of another HTTP actor. +func IsUnexpectedServerError(err error) bool { + switch t := err.(type) { + case APIStatus: + if d := t.Status().Details; d != nil { + for _, cause := range d.Causes { + if cause.Type == metav1.CauseTypeUnexpectedServerResponse { + return true + } + } + } + } + return false +} + +// IsUnexpectedObjectError determines if err is due to an unexpected object from the master. +func IsUnexpectedObjectError(err error) bool { + _, ok := err.(*UnexpectedObjectError) + return err != nil && ok +} + +// SuggestsClientDelay returns true if this error suggests a client delay as well as the +// suggested seconds to wait, or false if the error does not imply a wait. +func SuggestsClientDelay(err error) (int, bool) { + switch t := err.(type) { + case APIStatus: + if t.Status().Details != nil { + switch t.Status().Reason { + case metav1.StatusReasonServerTimeout, metav1.StatusReasonTimeout: + return int(t.Status().Details.RetryAfterSeconds), true + } + } + } + return 0, false +} + +func reasonForError(err error) metav1.StatusReason { + switch t := err.(type) { + case APIStatus: + return t.Status().Reason + } + return metav1.StatusReasonUnknown +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/default.go b/vendor/k8s.io/apimachinery/pkg/api/meta/default.go new file mode 100644 index 000000000..5ea906a2a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/default.go @@ -0,0 +1,51 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// NewDefaultRESTMapperFromScheme instantiates a DefaultRESTMapper based on types registered in the given scheme. +func NewDefaultRESTMapperFromScheme(defaultGroupVersions []schema.GroupVersion, interfacesFunc VersionInterfacesFunc, + importPathPrefix string, ignoredKinds, rootScoped sets.String, scheme *runtime.Scheme) *DefaultRESTMapper { + + mapper := NewDefaultRESTMapper(defaultGroupVersions, interfacesFunc) + // enumerate all supported versions, get the kinds, and register with the mapper how to address + // our resources. + for _, gv := range defaultGroupVersions { + for kind, oType := range scheme.KnownTypes(gv) { + gvk := gv.WithKind(kind) + // TODO: Remove import path check. + // We check the import path because we currently stuff both "api" and "extensions" objects + // into the same group within Scheme since Scheme has no notion of groups yet. + if !strings.Contains(oType.PkgPath(), importPathPrefix) || ignoredKinds.Has(kind) { + continue + } + scope := RESTScopeNamespace + if rootScoped.Has(kind) { + scope = RESTScopeRoot + } + mapper.Add(gvk, scope) + } + } + return mapper +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go new file mode 100644 index 000000000..b6d42acf8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package meta provides functions for retrieving API metadata from objects +// belonging to the Kubernetes API +package meta // import "k8s.io/apimachinery/pkg/api/meta" diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go new file mode 100644 index 000000000..1503bd6d8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go @@ -0,0 +1,105 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// AmbiguousResourceError is returned if the RESTMapper finds multiple matches for a resource +type AmbiguousResourceError struct { + PartialResource schema.GroupVersionResource + + MatchingResources []schema.GroupVersionResource + MatchingKinds []schema.GroupVersionKind +} + +func (e *AmbiguousResourceError) Error() string { + switch { + case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0: + return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialResource, e.MatchingResources, e.MatchingKinds) + case len(e.MatchingKinds) > 0: + return fmt.Sprintf("%v matches multiple kinds %v", e.PartialResource, e.MatchingKinds) + case len(e.MatchingResources) > 0: + return fmt.Sprintf("%v matches multiple resources %v", e.PartialResource, e.MatchingResources) + } + return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialResource) +} + +// AmbiguousKindError is returned if the RESTMapper finds multiple matches for a kind +type AmbiguousKindError struct { + PartialKind schema.GroupVersionKind + + MatchingResources []schema.GroupVersionResource + MatchingKinds []schema.GroupVersionKind +} + +func (e *AmbiguousKindError) Error() string { + switch { + case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0: + return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialKind, e.MatchingResources, e.MatchingKinds) + case len(e.MatchingKinds) > 0: + return fmt.Sprintf("%v matches multiple kinds %v", e.PartialKind, e.MatchingKinds) + case len(e.MatchingResources) > 0: + return fmt.Sprintf("%v matches multiple resources %v", e.PartialKind, e.MatchingResources) + } + return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialKind) +} + +func IsAmbiguousError(err error) bool { + if err == nil { + return false + } + switch err.(type) { + case *AmbiguousResourceError, *AmbiguousKindError: + return true + default: + return false + } +} + +// NoResourceMatchError is returned if the RESTMapper can't find any match for a resource +type NoResourceMatchError struct { + PartialResource schema.GroupVersionResource +} + +func (e *NoResourceMatchError) Error() string { + return fmt.Sprintf("no matches for %v", e.PartialResource) +} + +// NoKindMatchError is returned if the RESTMapper can't find any match for a kind +type NoKindMatchError struct { + PartialKind schema.GroupVersionKind +} + +func (e *NoKindMatchError) Error() string { + return fmt.Sprintf("no matches for %v", e.PartialKind) +} + +func IsNoMatchError(err error) bool { + if err == nil { + return false + } + switch err.(type) { + case *NoResourceMatchError, *NoKindMatchError: + return true + default: + return false + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go new file mode 100644 index 000000000..fd2210022 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go @@ -0,0 +1,97 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +// FirstHitRESTMapper is a wrapper for multiple RESTMappers which returns the +// first successful result for the singular requests +type FirstHitRESTMapper struct { + MultiRESTMapper +} + +func (m FirstHitRESTMapper) String() string { + return fmt.Sprintf("FirstHitRESTMapper{\n\t%v\n}", m.MultiRESTMapper) +} + +func (m FirstHitRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + errors := []error{} + for _, t := range m.MultiRESTMapper { + ret, err := t.ResourceFor(resource) + if err == nil { + return ret, nil + } + errors = append(errors, err) + } + + return schema.GroupVersionResource{}, collapseAggregateErrors(errors) +} + +func (m FirstHitRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + errors := []error{} + for _, t := range m.MultiRESTMapper { + ret, err := t.KindFor(resource) + if err == nil { + return ret, nil + } + errors = append(errors, err) + } + + return schema.GroupVersionKind{}, collapseAggregateErrors(errors) +} + +// RESTMapping provides the REST mapping for the resource based on the +// kind and version. This implementation supports multiple REST schemas and +// return the first match. +func (m FirstHitRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { + errors := []error{} + for _, t := range m.MultiRESTMapper { + ret, err := t.RESTMapping(gk, versions...) + if err == nil { + return ret, nil + } + errors = append(errors, err) + } + + return nil, collapseAggregateErrors(errors) +} + +// collapseAggregateErrors returns the minimal errors. it handles empty as nil, handles one item in a list +// by returning the item, and collapses all NoMatchErrors to a single one (since they should all be the same) +func collapseAggregateErrors(errors []error) error { + if len(errors) == 0 { + return nil + } + if len(errors) == 1 { + return errors[0] + } + + allNoMatchErrors := true + for _, err := range errors { + allNoMatchErrors = allNoMatchErrors && IsNoMatchError(err) + } + if allNoMatchErrors { + return errors[0] + } + + return utilerrors.NewAggregate(errors) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go new file mode 100644 index 000000000..9e0fb152a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -0,0 +1,202 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" +) + +// IsListType returns true if the provided Object has a slice called Items +func IsListType(obj runtime.Object) bool { + // if we're a runtime.Unstructured, check whether this is a list. + // TODO: refactor GetItemsPtr to use an interface that returns []runtime.Object + if unstructured, ok := obj.(runtime.Unstructured); ok { + return unstructured.IsList() + } + + _, err := GetItemsPtr(obj) + return err == nil +} + +// GetItemsPtr returns a pointer to the list object's Items member. +// If 'list' doesn't have an Items member, it's not really a list type +// and an error will be returned. +// This function will either return a pointer to a slice, or an error, but not both. +func GetItemsPtr(list runtime.Object) (interface{}, error) { + v, err := conversion.EnforcePtr(list) + if err != nil { + return nil, err + } + + items := v.FieldByName("Items") + if !items.IsValid() { + return nil, fmt.Errorf("no Items field in %#v", list) + } + switch items.Kind() { + case reflect.Interface, reflect.Ptr: + target := reflect.TypeOf(items.Interface()).Elem() + if target.Kind() != reflect.Slice { + return nil, fmt.Errorf("items: Expected slice, got %s", target.Kind()) + } + return items.Interface(), nil + case reflect.Slice: + return items.Addr().Interface(), nil + default: + return nil, fmt.Errorf("items: Expected slice, got %s", items.Kind()) + } +} + +// EachListItem invokes fn on each runtime.Object in the list. Any error immediately terminates +// the loop. +func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { + // TODO: Change to an interface call? + itemsPtr, err := GetItemsPtr(obj) + if err != nil { + return err + } + items, err := conversion.EnforcePtr(itemsPtr) + if err != nil { + return err + } + len := items.Len() + if len == 0 { + return nil + } + takeAddr := false + if elemType := items.Type().Elem(); elemType.Kind() != reflect.Ptr && elemType.Kind() != reflect.Interface { + if !items.Index(0).CanAddr() { + return fmt.Errorf("unable to take address of items in %T for EachListItem", obj) + } + takeAddr = true + } + + for i := 0; i < len; i++ { + raw := items.Index(i) + if takeAddr { + raw = raw.Addr() + } + switch item := raw.Interface().(type) { + case *runtime.RawExtension: + if err := fn(item.Object); err != nil { + return err + } + case runtime.Object: + if err := fn(item); err != nil { + return err + } + default: + obj, ok := item.(runtime.Object) + if !ok { + return fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind()) + } + if err := fn(obj); err != nil { + return err + } + } + } + return nil +} + +// ExtractList returns obj's Items element as an array of runtime.Objects. +// Returns an error if obj is not a List type (does not have an Items member). +func ExtractList(obj runtime.Object) ([]runtime.Object, error) { + itemsPtr, err := GetItemsPtr(obj) + if err != nil { + return nil, err + } + items, err := conversion.EnforcePtr(itemsPtr) + if err != nil { + return nil, err + } + list := make([]runtime.Object, items.Len()) + for i := range list { + raw := items.Index(i) + switch item := raw.Interface().(type) { + case runtime.RawExtension: + switch { + case item.Object != nil: + list[i] = item.Object + case item.Raw != nil: + // TODO: Set ContentEncoding and ContentType correctly. + list[i] = &runtime.Unknown{Raw: item.Raw} + default: + list[i] = nil + } + case runtime.Object: + list[i] = item + default: + var found bool + if list[i], found = raw.Addr().Interface().(runtime.Object); !found { + return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind()) + } + } + } + return list, nil +} + +// objectSliceType is the type of a slice of Objects +var objectSliceType = reflect.TypeOf([]runtime.Object{}) + +// SetList sets the given list object's Items member have the elements given in +// objects. +// Returns an error if list is not a List type (does not have an Items member), +// or if any of the objects are not of the right type. +func SetList(list runtime.Object, objects []runtime.Object) error { + itemsPtr, err := GetItemsPtr(list) + if err != nil { + return err + } + items, err := conversion.EnforcePtr(itemsPtr) + if err != nil { + return err + } + if items.Type() == objectSliceType { + items.Set(reflect.ValueOf(objects)) + return nil + } + slice := reflect.MakeSlice(items.Type(), len(objects), len(objects)) + for i := range objects { + dest := slice.Index(i) + if dest.Type() == reflect.TypeOf(runtime.RawExtension{}) { + dest = dest.FieldByName("Object") + } + + // check to see if you're directly assignable + if reflect.TypeOf(objects[i]).AssignableTo(dest.Type()) { + dest.Set(reflect.ValueOf(objects[i])) + continue + } + + src, err := conversion.EnforcePtr(objects[i]) + if err != nil { + return err + } + if src.Type().AssignableTo(dest.Type()) { + dest.Set(src) + } else if src.Type().ConvertibleTo(dest.Type()) { + dest.Set(src.Convert(dest.Type())) + } else { + return fmt.Errorf("item[%d]: can't assign or convert %v into %v", i, src.Type(), dest.Type()) + } + } + items.Set(slice) + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go new file mode 100644 index 000000000..b2c8c97b1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go @@ -0,0 +1,146 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +// VersionInterfaces contains the interfaces one should use for dealing with types of a particular version. +type VersionInterfaces struct { + runtime.ObjectConvertor + MetadataAccessor +} + +type ListMetaAccessor interface { + GetListMeta() List +} + +// List lets you work with list metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field will be a no-op and return a default value. +type List metav1.List + +// Type exposes the type and APIVersion of versioned or internal API objects. +type Type metav1.Type + +// MetadataAccessor lets you work with object and list metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field (Name, UID, Namespace on lists) will be a no-op and return +// a default value. +// +// MetadataAccessor exposes Interface in a way that can be used with multiple objects. +type MetadataAccessor interface { + APIVersion(obj runtime.Object) (string, error) + SetAPIVersion(obj runtime.Object, version string) error + + Kind(obj runtime.Object) (string, error) + SetKind(obj runtime.Object, kind string) error + + Namespace(obj runtime.Object) (string, error) + SetNamespace(obj runtime.Object, namespace string) error + + Name(obj runtime.Object) (string, error) + SetName(obj runtime.Object, name string) error + + GenerateName(obj runtime.Object) (string, error) + SetGenerateName(obj runtime.Object, name string) error + + UID(obj runtime.Object) (types.UID, error) + SetUID(obj runtime.Object, uid types.UID) error + + SelfLink(obj runtime.Object) (string, error) + SetSelfLink(obj runtime.Object, selfLink string) error + + Labels(obj runtime.Object) (map[string]string, error) + SetLabels(obj runtime.Object, labels map[string]string) error + + Annotations(obj runtime.Object) (map[string]string, error) + SetAnnotations(obj runtime.Object, annotations map[string]string) error + + runtime.ResourceVersioner +} + +type RESTScopeName string + +const ( + RESTScopeNameNamespace RESTScopeName = "namespace" + RESTScopeNameRoot RESTScopeName = "root" +) + +// RESTScope contains the information needed to deal with REST resources that are in a resource hierarchy +type RESTScope interface { + // Name of the scope + Name() RESTScopeName + // ParamName is the optional name of the parameter that should be inserted in the resource url + // If empty, no param will be inserted + ParamName() string + // ArgumentName is the optional name that should be used for the variable holding the value. + ArgumentName() string + // ParamDescription is the optional description to use to document the parameter in api documentation + ParamDescription() string +} + +// RESTMapping contains the information needed to deal with objects of a specific +// resource and kind in a RESTful manner. +type RESTMapping struct { + // Resource is a string representing the name of this resource as a REST client would see it + Resource string + + GroupVersionKind schema.GroupVersionKind + + // Scope contains the information needed to deal with REST Resources that are in a resource hierarchy + Scope RESTScope + + runtime.ObjectConvertor + MetadataAccessor +} + +// RESTMapper allows clients to map resources to kind, and map kind and version +// to interfaces for manipulating those objects. It is primarily intended for +// consumers of Kubernetes compatible REST APIs as defined in docs/devel/api-conventions.md. +// +// The Kubernetes API provides versioned resources and object kinds which are scoped +// to API groups. In other words, kinds and resources should not be assumed to be +// unique across groups. +// +// TODO: split into sub-interfaces +type RESTMapper interface { + // KindFor takes a partial resource and returns the single match. Returns an error if there are multiple matches + KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) + + // KindsFor takes a partial resource and returns the list of potential kinds in priority order + KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) + + // ResourceFor takes a partial resource and returns the single match. Returns an error if there are multiple matches + ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) + + // ResourcesFor takes a partial resource and returns the list of potential resource in priority order + ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) + + // RESTMapping identifies a preferred resource mapping for the provided group kind. + RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) + // RESTMappings returns all resource mappings for the provided group kind if no + // version search is provided. Otherwise identifies a preferred resource mapping for + // the provided version(s). + RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) + + ResourceSingularizer(resource string) (singular string, err error) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go new file mode 100644 index 000000000..45d850ea8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -0,0 +1,610 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + "reflect" + + "github.com/golang/glog" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1alpha1 "k8s.io/apimachinery/pkg/apis/meta/v1alpha1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +// errNotList is returned when an object implements the Object style interfaces but not the List style +// interfaces. +var errNotList = fmt.Errorf("object does not implement the List interfaces") + +// ListAccessor returns a List interface for the provided object or an error if the object does +// not provide List. +// IMPORTANT: Objects are a superset of lists, so all Objects return List metadata. Do not use this +// check to determine whether an object *is* a List. +// TODO: return bool instead of error +func ListAccessor(obj interface{}) (List, error) { + switch t := obj.(type) { + case List: + return t, nil + case metav1.List: + return t, nil + case ListMetaAccessor: + if m := t.GetListMeta(); m != nil { + return m, nil + } + return nil, errNotList + case metav1.ListMetaAccessor: + if m := t.GetListMeta(); m != nil { + return m, nil + } + return nil, errNotList + case metav1.Object: + return t, nil + case metav1.ObjectMetaAccessor: + if m := t.GetObjectMeta(); m != nil { + return m, nil + } + return nil, errNotList + default: + return nil, errNotList + } +} + +// errNotObject is returned when an object implements the List style interfaces but not the Object style +// interfaces. +var errNotObject = fmt.Errorf("object does not implement the Object interfaces") + +// Accessor takes an arbitrary object pointer and returns meta.Interface. +// obj must be a pointer to an API type. An error is returned if the minimum +// required fields are missing. Fields that are not required return the default +// value and are a no-op if set. +// TODO: return bool instead of error +func Accessor(obj interface{}) (metav1.Object, error) { + switch t := obj.(type) { + case metav1.Object: + return t, nil + case metav1.ObjectMetaAccessor: + if m := t.GetObjectMeta(); m != nil { + return m, nil + } + return nil, errNotObject + default: + return nil, errNotObject + } +} + +// AsPartialObjectMetadata takes the metav1 interface and returns a partial object. +// TODO: consider making this solely a conversion action. +func AsPartialObjectMetadata(m metav1.Object) *metav1alpha1.PartialObjectMetadata { + switch t := m.(type) { + case *metav1.ObjectMeta: + return &metav1alpha1.PartialObjectMetadata{ObjectMeta: *t} + default: + return &metav1alpha1.PartialObjectMetadata{ + ObjectMeta: metav1.ObjectMeta{ + Name: m.GetName(), + GenerateName: m.GetGenerateName(), + Namespace: m.GetNamespace(), + SelfLink: m.GetSelfLink(), + UID: m.GetUID(), + ResourceVersion: m.GetResourceVersion(), + Generation: m.GetGeneration(), + CreationTimestamp: m.GetCreationTimestamp(), + DeletionTimestamp: m.GetDeletionTimestamp(), + DeletionGracePeriodSeconds: m.GetDeletionGracePeriodSeconds(), + Labels: m.GetLabels(), + Annotations: m.GetAnnotations(), + OwnerReferences: m.GetOwnerReferences(), + Finalizers: m.GetFinalizers(), + ClusterName: m.GetClusterName(), + Initializers: m.GetInitializers(), + }, + } + } +} + +// TypeAccessor returns an interface that allows retrieving and modifying the APIVersion +// and Kind of an in-memory internal object. +// TODO: this interface is used to test code that does not have ObjectMeta or ListMeta +// in round tripping (objects which can use apiVersion/kind, but do not fit the Kube +// api conventions). +func TypeAccessor(obj interface{}) (Type, error) { + if typed, ok := obj.(runtime.Object); ok { + return objectAccessor{typed}, nil + } + v, err := conversion.EnforcePtr(obj) + if err != nil { + return nil, err + } + t := v.Type() + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface()) + } + + typeMeta := v.FieldByName("TypeMeta") + if !typeMeta.IsValid() { + return nil, fmt.Errorf("struct %v lacks embedded TypeMeta type", t) + } + a := &genericAccessor{} + if err := extractFromTypeMeta(typeMeta, a); err != nil { + return nil, fmt.Errorf("unable to find type fields on %#v: %v", typeMeta, err) + } + return a, nil +} + +type objectAccessor struct { + runtime.Object +} + +func (obj objectAccessor) GetKind() string { + return obj.GetObjectKind().GroupVersionKind().Kind +} + +func (obj objectAccessor) SetKind(kind string) { + gvk := obj.GetObjectKind().GroupVersionKind() + gvk.Kind = kind + obj.GetObjectKind().SetGroupVersionKind(gvk) +} + +func (obj objectAccessor) GetAPIVersion() string { + return obj.GetObjectKind().GroupVersionKind().GroupVersion().String() +} + +func (obj objectAccessor) SetAPIVersion(version string) { + gvk := obj.GetObjectKind().GroupVersionKind() + gv, err := schema.ParseGroupVersion(version) + if err != nil { + gv = schema.GroupVersion{Version: version} + } + gvk.Group, gvk.Version = gv.Group, gv.Version + obj.GetObjectKind().SetGroupVersionKind(gvk) +} + +// NewAccessor returns a MetadataAccessor that can retrieve +// or manipulate resource version on objects derived from core API +// metadata concepts. +func NewAccessor() MetadataAccessor { + return resourceAccessor{} +} + +// resourceAccessor implements ResourceVersioner and SelfLinker. +type resourceAccessor struct{} + +func (resourceAccessor) Kind(obj runtime.Object) (string, error) { + return objectAccessor{obj}.GetKind(), nil +} + +func (resourceAccessor) SetKind(obj runtime.Object, kind string) error { + objectAccessor{obj}.SetKind(kind) + return nil +} + +func (resourceAccessor) APIVersion(obj runtime.Object) (string, error) { + return objectAccessor{obj}.GetAPIVersion(), nil +} + +func (resourceAccessor) SetAPIVersion(obj runtime.Object, version string) error { + objectAccessor{obj}.SetAPIVersion(version) + return nil +} + +func (resourceAccessor) Namespace(obj runtime.Object) (string, error) { + accessor, err := Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetNamespace(), nil +} + +func (resourceAccessor) SetNamespace(obj runtime.Object, namespace string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetNamespace(namespace) + return nil +} + +func (resourceAccessor) Name(obj runtime.Object) (string, error) { + accessor, err := Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetName(), nil +} + +func (resourceAccessor) SetName(obj runtime.Object, name string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetName(name) + return nil +} + +func (resourceAccessor) GenerateName(obj runtime.Object) (string, error) { + accessor, err := Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetGenerateName(), nil +} + +func (resourceAccessor) SetGenerateName(obj runtime.Object, name string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetGenerateName(name) + return nil +} + +func (resourceAccessor) UID(obj runtime.Object) (types.UID, error) { + accessor, err := Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetUID(), nil +} + +func (resourceAccessor) SetUID(obj runtime.Object, uid types.UID) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetUID(uid) + return nil +} + +func (resourceAccessor) SelfLink(obj runtime.Object) (string, error) { + accessor, err := ListAccessor(obj) + if err != nil { + return "", err + } + return accessor.GetSelfLink(), nil +} + +func (resourceAccessor) SetSelfLink(obj runtime.Object, selfLink string) error { + accessor, err := ListAccessor(obj) + if err != nil { + return err + } + accessor.SetSelfLink(selfLink) + return nil +} + +func (resourceAccessor) Labels(obj runtime.Object) (map[string]string, error) { + accessor, err := Accessor(obj) + if err != nil { + return nil, err + } + return accessor.GetLabels(), nil +} + +func (resourceAccessor) SetLabels(obj runtime.Object, labels map[string]string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetLabels(labels) + return nil +} + +func (resourceAccessor) Annotations(obj runtime.Object) (map[string]string, error) { + accessor, err := Accessor(obj) + if err != nil { + return nil, err + } + return accessor.GetAnnotations(), nil +} + +func (resourceAccessor) SetAnnotations(obj runtime.Object, annotations map[string]string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetAnnotations(annotations) + return nil +} + +func (resourceAccessor) ResourceVersion(obj runtime.Object) (string, error) { + accessor, err := ListAccessor(obj) + if err != nil { + return "", err + } + return accessor.GetResourceVersion(), nil +} + +func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) error { + accessor, err := ListAccessor(obj) + if err != nil { + return err + } + accessor.SetResourceVersion(version) + return nil +} + +// extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object. +func extractFromOwnerReference(v reflect.Value, o *metav1.OwnerReference) error { + if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil { + return err + } + if err := runtime.Field(v, "Kind", &o.Kind); err != nil { + return err + } + if err := runtime.Field(v, "Name", &o.Name); err != nil { + return err + } + if err := runtime.Field(v, "UID", &o.UID); err != nil { + return err + } + var controllerPtr *bool + if err := runtime.Field(v, "Controller", &controllerPtr); err != nil { + return err + } + if controllerPtr != nil { + controller := *controllerPtr + o.Controller = &controller + } + var blockOwnerDeletionPtr *bool + if err := runtime.Field(v, "BlockOwnerDeletion", &blockOwnerDeletionPtr); err != nil { + return err + } + if blockOwnerDeletionPtr != nil { + block := *blockOwnerDeletionPtr + o.BlockOwnerDeletion = &block + } + return nil +} + +// setOwnerReference sets v to o. v is the OwnerReferences field of an object. +func setOwnerReference(v reflect.Value, o *metav1.OwnerReference) error { + if err := runtime.SetField(o.APIVersion, v, "APIVersion"); err != nil { + return err + } + if err := runtime.SetField(o.Kind, v, "Kind"); err != nil { + return err + } + if err := runtime.SetField(o.Name, v, "Name"); err != nil { + return err + } + if err := runtime.SetField(o.UID, v, "UID"); err != nil { + return err + } + if o.Controller != nil { + controller := *(o.Controller) + if err := runtime.SetField(&controller, v, "Controller"); err != nil { + return err + } + } + if o.BlockOwnerDeletion != nil { + block := *(o.BlockOwnerDeletion) + if err := runtime.SetField(&block, v, "BlockOwnerDeletion"); err != nil { + return err + } + } + return nil +} + +// genericAccessor contains pointers to strings that can modify an arbitrary +// struct and implements the Accessor interface. +type genericAccessor struct { + namespace *string + name *string + generateName *string + uid *types.UID + apiVersion *string + kind *string + resourceVersion *string + selfLink *string + creationTimestamp *metav1.Time + deletionTimestamp **metav1.Time + labels *map[string]string + annotations *map[string]string + ownerReferences reflect.Value + finalizers *[]string +} + +func (a genericAccessor) GetNamespace() string { + if a.namespace == nil { + return "" + } + return *a.namespace +} + +func (a genericAccessor) SetNamespace(namespace string) { + if a.namespace == nil { + return + } + *a.namespace = namespace +} + +func (a genericAccessor) GetName() string { + if a.name == nil { + return "" + } + return *a.name +} + +func (a genericAccessor) SetName(name string) { + if a.name == nil { + return + } + *a.name = name +} + +func (a genericAccessor) GetGenerateName() string { + if a.generateName == nil { + return "" + } + return *a.generateName +} + +func (a genericAccessor) SetGenerateName(generateName string) { + if a.generateName == nil { + return + } + *a.generateName = generateName +} + +func (a genericAccessor) GetUID() types.UID { + if a.uid == nil { + return "" + } + return *a.uid +} + +func (a genericAccessor) SetUID(uid types.UID) { + if a.uid == nil { + return + } + *a.uid = uid +} + +func (a genericAccessor) GetAPIVersion() string { + return *a.apiVersion +} + +func (a genericAccessor) SetAPIVersion(version string) { + *a.apiVersion = version +} + +func (a genericAccessor) GetKind() string { + return *a.kind +} + +func (a genericAccessor) SetKind(kind string) { + *a.kind = kind +} + +func (a genericAccessor) GetResourceVersion() string { + return *a.resourceVersion +} + +func (a genericAccessor) SetResourceVersion(version string) { + *a.resourceVersion = version +} + +func (a genericAccessor) GetSelfLink() string { + return *a.selfLink +} + +func (a genericAccessor) SetSelfLink(selfLink string) { + *a.selfLink = selfLink +} + +func (a genericAccessor) GetCreationTimestamp() metav1.Time { + return *a.creationTimestamp +} + +func (a genericAccessor) SetCreationTimestamp(timestamp metav1.Time) { + *a.creationTimestamp = timestamp +} + +func (a genericAccessor) GetDeletionTimestamp() *metav1.Time { + return *a.deletionTimestamp +} + +func (a genericAccessor) SetDeletionTimestamp(timestamp *metav1.Time) { + *a.deletionTimestamp = timestamp +} + +func (a genericAccessor) GetLabels() map[string]string { + if a.labels == nil { + return nil + } + return *a.labels +} + +func (a genericAccessor) SetLabels(labels map[string]string) { + *a.labels = labels +} + +func (a genericAccessor) GetAnnotations() map[string]string { + if a.annotations == nil { + return nil + } + return *a.annotations +} + +func (a genericAccessor) SetAnnotations(annotations map[string]string) { + if a.annotations == nil { + emptyAnnotations := make(map[string]string) + a.annotations = &emptyAnnotations + } + *a.annotations = annotations +} + +func (a genericAccessor) GetFinalizers() []string { + if a.finalizers == nil { + return nil + } + return *a.finalizers +} + +func (a genericAccessor) SetFinalizers(finalizers []string) { + *a.finalizers = finalizers +} + +func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { + var ret []metav1.OwnerReference + s := a.ownerReferences + if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { + glog.Errorf("expect %v to be a pointer to slice", s) + return ret + } + s = s.Elem() + // Set the capacity to one element greater to avoid copy if the caller later append an element. + ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1) + for i := 0; i < s.Len(); i++ { + if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil { + glog.Errorf("extractFromOwnerReference failed: %v", err) + return ret + } + } + return ret +} + +func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) { + s := a.ownerReferences + if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { + glog.Errorf("expect %v to be a pointer to slice", s) + } + s = s.Elem() + newReferences := reflect.MakeSlice(s.Type(), len(references), len(references)) + for i := 0; i < len(references); i++ { + if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil { + glog.Errorf("setOwnerReference failed: %v", err) + return + } + } + s.Set(newReferences) +} + +// extractFromTypeMeta extracts pointers to version and kind fields from an object +func extractFromTypeMeta(v reflect.Value, a *genericAccessor) error { + if err := runtime.FieldPtr(v, "APIVersion", &a.apiVersion); err != nil { + return err + } + if err := runtime.FieldPtr(v, "Kind", &a.kind); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go new file mode 100644 index 000000000..679098fe5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go @@ -0,0 +1,210 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +// MultiRESTMapper is a wrapper for multiple RESTMappers. +type MultiRESTMapper []RESTMapper + +func (m MultiRESTMapper) String() string { + nested := []string{} + for _, t := range m { + currString := fmt.Sprintf("%v", t) + splitStrings := strings.Split(currString, "\n") + nested = append(nested, strings.Join(splitStrings, "\n\t")) + } + + return fmt.Sprintf("MultiRESTMapper{\n\t%s\n}", strings.Join(nested, "\n\t")) +} + +// ResourceSingularizer converts a REST resource name from plural to singular (e.g., from pods to pod) +// This implementation supports multiple REST schemas and return the first match. +func (m MultiRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + for _, t := range m { + singular, err = t.ResourceSingularizer(resource) + if err == nil { + return + } + } + return +} + +func (m MultiRESTMapper) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + allGVRs := []schema.GroupVersionResource{} + for _, t := range m { + gvrs, err := t.ResourcesFor(resource) + // ignore "no match" errors, but any other error percolates back up + if IsNoMatchError(err) { + continue + } + if err != nil { + return nil, err + } + + // walk the existing values to de-dup + for _, curr := range gvrs { + found := false + for _, existing := range allGVRs { + if curr == existing { + found = true + break + } + } + + if !found { + allGVRs = append(allGVRs, curr) + } + } + } + + if len(allGVRs) == 0 { + return nil, &NoResourceMatchError{PartialResource: resource} + } + + return allGVRs, nil +} + +func (m MultiRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) { + allGVKs := []schema.GroupVersionKind{} + for _, t := range m { + gvks, err := t.KindsFor(resource) + // ignore "no match" errors, but any other error percolates back up + if IsNoMatchError(err) { + continue + } + if err != nil { + return nil, err + } + + // walk the existing values to de-dup + for _, curr := range gvks { + found := false + for _, existing := range allGVKs { + if curr == existing { + found = true + break + } + } + + if !found { + allGVKs = append(allGVKs, curr) + } + } + } + + if len(allGVKs) == 0 { + return nil, &NoResourceMatchError{PartialResource: resource} + } + + return allGVKs, nil +} + +func (m MultiRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + resources, err := m.ResourcesFor(resource) + if err != nil { + return schema.GroupVersionResource{}, err + } + if len(resources) == 1 { + return resources[0], nil + } + + return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources} +} + +func (m MultiRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + kinds, err := m.KindsFor(resource) + if err != nil { + return schema.GroupVersionKind{}, err + } + if len(kinds) == 1 { + return kinds[0], nil + } + + return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds} +} + +// RESTMapping provides the REST mapping for the resource based on the +// kind and version. This implementation supports multiple REST schemas and +// return the first match. +func (m MultiRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { + allMappings := []*RESTMapping{} + errors := []error{} + + for _, t := range m { + currMapping, err := t.RESTMapping(gk, versions...) + // ignore "no match" errors, but any other error percolates back up + if IsNoMatchError(err) { + continue + } + if err != nil { + errors = append(errors, err) + continue + } + + allMappings = append(allMappings, currMapping) + } + + // if we got exactly one mapping, then use it even if other requested failed + if len(allMappings) == 1 { + return allMappings[0], nil + } + if len(allMappings) > 1 { + var kinds []schema.GroupVersionKind + for _, m := range allMappings { + kinds = append(kinds, m.GroupVersionKind) + } + return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds} + } + if len(errors) > 0 { + return nil, utilerrors.NewAggregate(errors) + } + return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} +} + +// RESTMappings returns all possible RESTMappings for the provided group kind, or an error +// if the type is not recognized. +func (m MultiRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { + var allMappings []*RESTMapping + var errors []error + + for _, t := range m { + currMappings, err := t.RESTMappings(gk, versions...) + // ignore "no match" errors, but any other error percolates back up + if IsNoMatchError(err) { + continue + } + if err != nil { + errors = append(errors, err) + continue + } + allMappings = append(allMappings, currMappings...) + } + if len(errors) > 0 { + return nil, utilerrors.NewAggregate(errors) + } + if len(allMappings) == 0 { + return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} + } + return allMappings, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go new file mode 100644 index 000000000..2a14aa7ab --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go @@ -0,0 +1,222 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + AnyGroup = "*" + AnyVersion = "*" + AnyResource = "*" + AnyKind = "*" +) + +// PriorityRESTMapper is a wrapper for automatically choosing a particular Resource or Kind +// when multiple matches are possible +type PriorityRESTMapper struct { + // Delegate is the RESTMapper to use to locate all the Kind and Resource matches + Delegate RESTMapper + + // ResourcePriority is a list of priority patterns to apply to matching resources. + // The list of all matching resources is narrowed based on the patterns until only one remains. + // A pattern with no matches is skipped. A pattern with more than one match uses its + // matches as the list to continue matching against. + ResourcePriority []schema.GroupVersionResource + + // KindPriority is a list of priority patterns to apply to matching kinds. + // The list of all matching kinds is narrowed based on the patterns until only one remains. + // A pattern with no matches is skipped. A pattern with more than one match uses its + // matches as the list to continue matching against. + KindPriority []schema.GroupVersionKind +} + +func (m PriorityRESTMapper) String() string { + return fmt.Sprintf("PriorityRESTMapper{\n\t%v\n\t%v\n\t%v\n}", m.ResourcePriority, m.KindPriority, m.Delegate) +} + +// ResourceFor finds all resources, then passes them through the ResourcePriority patterns to find a single matching hit. +func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + originalGVRs, err := m.Delegate.ResourcesFor(partiallySpecifiedResource) + if err != nil { + return schema.GroupVersionResource{}, err + } + if len(originalGVRs) == 1 { + return originalGVRs[0], nil + } + + remainingGVRs := append([]schema.GroupVersionResource{}, originalGVRs...) + for _, pattern := range m.ResourcePriority { + matchedGVRs := []schema.GroupVersionResource{} + for _, gvr := range remainingGVRs { + if resourceMatches(pattern, gvr) { + matchedGVRs = append(matchedGVRs, gvr) + } + } + + switch len(matchedGVRs) { + case 0: + // if you have no matches, then nothing matched this pattern just move to the next + continue + case 1: + // one match, return + return matchedGVRs[0], nil + default: + // more than one match, use the matched hits as the list moving to the next pattern. + // this way you can have a series of selection criteria + remainingGVRs = matchedGVRs + } + } + + return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingResources: originalGVRs} +} + +// KindFor finds all kinds, then passes them through the KindPriority patterns to find a single matching hit. +func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + originalGVKs, err := m.Delegate.KindsFor(partiallySpecifiedResource) + if err != nil { + return schema.GroupVersionKind{}, err + } + if len(originalGVKs) == 1 { + return originalGVKs[0], nil + } + + remainingGVKs := append([]schema.GroupVersionKind{}, originalGVKs...) + for _, pattern := range m.KindPriority { + matchedGVKs := []schema.GroupVersionKind{} + for _, gvr := range remainingGVKs { + if kindMatches(pattern, gvr) { + matchedGVKs = append(matchedGVKs, gvr) + } + } + + switch len(matchedGVKs) { + case 0: + // if you have no matches, then nothing matched this pattern just move to the next + continue + case 1: + // one match, return + return matchedGVKs[0], nil + default: + // more than one match, use the matched hits as the list moving to the next pattern. + // this way you can have a series of selection criteria + remainingGVKs = matchedGVKs + } + } + + return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingKinds: originalGVKs} +} + +func resourceMatches(pattern schema.GroupVersionResource, resource schema.GroupVersionResource) bool { + if pattern.Group != AnyGroup && pattern.Group != resource.Group { + return false + } + if pattern.Version != AnyVersion && pattern.Version != resource.Version { + return false + } + if pattern.Resource != AnyResource && pattern.Resource != resource.Resource { + return false + } + + return true +} + +func kindMatches(pattern schema.GroupVersionKind, kind schema.GroupVersionKind) bool { + if pattern.Group != AnyGroup && pattern.Group != kind.Group { + return false + } + if pattern.Version != AnyVersion && pattern.Version != kind.Version { + return false + } + if pattern.Kind != AnyKind && pattern.Kind != kind.Kind { + return false + } + + return true +} + +func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (mapping *RESTMapping, err error) { + mappings, err := m.Delegate.RESTMappings(gk) + if err != nil { + return nil, err + } + + // any versions the user provides take priority + priorities := m.KindPriority + if len(versions) > 0 { + priorities = make([]schema.GroupVersionKind, 0, len(m.KindPriority)+len(versions)) + for _, version := range versions { + gv := schema.GroupVersion{ + Version: version, + Group: gk.Group, + } + priorities = append(priorities, gv.WithKind(AnyKind)) + } + priorities = append(priorities, m.KindPriority...) + } + + remaining := append([]*RESTMapping{}, mappings...) + for _, pattern := range priorities { + var matching []*RESTMapping + for _, m := range remaining { + if kindMatches(pattern, m.GroupVersionKind) { + matching = append(matching, m) + } + } + + switch len(matching) { + case 0: + // if you have no matches, then nothing matched this pattern just move to the next + continue + case 1: + // one match, return + return matching[0], nil + default: + // more than one match, use the matched hits as the list moving to the next pattern. + // this way you can have a series of selection criteria + remaining = matching + } + } + if len(remaining) == 1 { + return remaining[0], nil + } + + var kinds []schema.GroupVersionKind + for _, m := range mappings { + kinds = append(kinds, m.GroupVersionKind) + } + return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds} +} + +func (m PriorityRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { + return m.Delegate.RESTMappings(gk, versions...) +} + +func (m PriorityRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + return m.Delegate.ResourceSingularizer(resource) +} + +func (m PriorityRESTMapper) ResourcesFor(partiallySpecifiedResource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return m.Delegate.ResourcesFor(partiallySpecifiedResource) +} + +func (m PriorityRESTMapper) KindsFor(partiallySpecifiedResource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) { + return m.Delegate.KindsFor(partiallySpecifiedResource) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go new file mode 100644 index 000000000..c6b2597dd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go @@ -0,0 +1,545 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// TODO: move everything in this file to pkg/api/rest +package meta + +import ( + "fmt" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Implements RESTScope interface +type restScope struct { + name RESTScopeName + paramName string + argumentName string + paramDescription string +} + +func (r *restScope) Name() RESTScopeName { + return r.name +} +func (r *restScope) ParamName() string { + return r.paramName +} +func (r *restScope) ArgumentName() string { + return r.argumentName +} +func (r *restScope) ParamDescription() string { + return r.paramDescription +} + +var RESTScopeNamespace = &restScope{ + name: RESTScopeNameNamespace, + paramName: "namespaces", + argumentName: "namespace", + paramDescription: "object name and auth scope, such as for teams and projects", +} + +var RESTScopeRoot = &restScope{ + name: RESTScopeNameRoot, +} + +// DefaultRESTMapper exposes mappings between the types defined in a +// runtime.Scheme. It assumes that all types defined the provided scheme +// can be mapped with the provided MetadataAccessor and Codec interfaces. +// +// The resource name of a Kind is defined as the lowercase, +// English-plural version of the Kind string. +// When converting from resource to Kind, the singular version of the +// resource name is also accepted for convenience. +// +// TODO: Only accept plural for some operations for increased control? +// (`get pod bar` vs `get pods bar`) +type DefaultRESTMapper struct { + defaultGroupVersions []schema.GroupVersion + + resourceToKind map[schema.GroupVersionResource]schema.GroupVersionKind + kindToPluralResource map[schema.GroupVersionKind]schema.GroupVersionResource + kindToScope map[schema.GroupVersionKind]RESTScope + singularToPlural map[schema.GroupVersionResource]schema.GroupVersionResource + pluralToSingular map[schema.GroupVersionResource]schema.GroupVersionResource + + interfacesFunc VersionInterfacesFunc +} + +func (m *DefaultRESTMapper) String() string { + return fmt.Sprintf("DefaultRESTMapper{kindToPluralResource=%v}", m.kindToPluralResource) +} + +var _ RESTMapper = &DefaultRESTMapper{} + +// VersionInterfacesFunc returns the appropriate typer, and metadata accessor for a +// given api version, or an error if no such api version exists. +type VersionInterfacesFunc func(version schema.GroupVersion) (*VersionInterfaces, error) + +// NewDefaultRESTMapper initializes a mapping between Kind and APIVersion +// to a resource name and back based on the objects in a runtime.Scheme +// and the Kubernetes API conventions. Takes a group name, a priority list of the versions +// to search when an object has no default version (set empty to return an error), +// and a function that retrieves the correct metadata for a given version. +func NewDefaultRESTMapper(defaultGroupVersions []schema.GroupVersion, f VersionInterfacesFunc) *DefaultRESTMapper { + resourceToKind := make(map[schema.GroupVersionResource]schema.GroupVersionKind) + kindToPluralResource := make(map[schema.GroupVersionKind]schema.GroupVersionResource) + kindToScope := make(map[schema.GroupVersionKind]RESTScope) + singularToPlural := make(map[schema.GroupVersionResource]schema.GroupVersionResource) + pluralToSingular := make(map[schema.GroupVersionResource]schema.GroupVersionResource) + // TODO: verify name mappings work correctly when versions differ + + return &DefaultRESTMapper{ + resourceToKind: resourceToKind, + kindToPluralResource: kindToPluralResource, + kindToScope: kindToScope, + defaultGroupVersions: defaultGroupVersions, + singularToPlural: singularToPlural, + pluralToSingular: pluralToSingular, + interfacesFunc: f, + } +} + +func (m *DefaultRESTMapper) Add(kind schema.GroupVersionKind, scope RESTScope) { + plural, singular := UnsafeGuessKindToResource(kind) + + m.singularToPlural[singular] = plural + m.pluralToSingular[plural] = singular + + m.resourceToKind[singular] = kind + m.resourceToKind[plural] = kind + + m.kindToPluralResource[kind] = plural + m.kindToScope[kind] = scope +} + +// unpluralizedSuffixes is a list of resource suffixes that are the same plural and singular +// This is only is only necessary because some bits of code are lazy and don't actually use the RESTMapper like they should. +// TODO eliminate this so that different callers can correctly map to resources. This probably means updating all +// callers to use the RESTMapper they mean. +var unpluralizedSuffixes = []string{ + "endpoints", +} + +// UnsafeGuessKindToResource converts Kind to a resource name. +// Broken. This method only "sort of" works when used outside of this package. It assumes that Kinds and Resources match +// and they aren't guaranteed to do so. +func UnsafeGuessKindToResource(kind schema.GroupVersionKind) ( /*plural*/ schema.GroupVersionResource /*singular*/, schema.GroupVersionResource) { + kindName := kind.Kind + if len(kindName) == 0 { + return schema.GroupVersionResource{}, schema.GroupVersionResource{} + } + singularName := strings.ToLower(kindName) + singular := kind.GroupVersion().WithResource(singularName) + + for _, skip := range unpluralizedSuffixes { + if strings.HasSuffix(singularName, skip) { + return singular, singular + } + } + + switch string(singularName[len(singularName)-1]) { + case "s": + return kind.GroupVersion().WithResource(singularName + "es"), singular + case "y": + return kind.GroupVersion().WithResource(strings.TrimSuffix(singularName, "y") + "ies"), singular + } + + return kind.GroupVersion().WithResource(singularName + "s"), singular +} + +// ResourceSingularizer implements RESTMapper +// It converts a resource name from plural to singular (e.g., from pods to pod) +func (m *DefaultRESTMapper) ResourceSingularizer(resourceType string) (string, error) { + partialResource := schema.GroupVersionResource{Resource: resourceType} + resources, err := m.ResourcesFor(partialResource) + if err != nil { + return resourceType, err + } + + singular := schema.GroupVersionResource{} + for _, curr := range resources { + currSingular, ok := m.pluralToSingular[curr] + if !ok { + continue + } + if singular.Empty() { + singular = currSingular + continue + } + + if currSingular.Resource != singular.Resource { + return resourceType, fmt.Errorf("multiple possible singular resources (%v) found for %v", resources, resourceType) + } + } + + if singular.Empty() { + return resourceType, fmt.Errorf("no singular of resource %v has been defined", resourceType) + } + + return singular.Resource, nil +} + +// coerceResourceForMatching makes the resource lower case and converts internal versions to unspecified (legacy behavior) +func coerceResourceForMatching(resource schema.GroupVersionResource) schema.GroupVersionResource { + resource.Resource = strings.ToLower(resource.Resource) + if resource.Version == runtime.APIVersionInternal { + resource.Version = "" + } + + return resource +} + +func (m *DefaultRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + resource := coerceResourceForMatching(input) + + hasResource := len(resource.Resource) > 0 + hasGroup := len(resource.Group) > 0 + hasVersion := len(resource.Version) > 0 + + if !hasResource { + return nil, fmt.Errorf("a resource must be present, got: %v", resource) + } + + ret := []schema.GroupVersionResource{} + switch { + case hasGroup && hasVersion: + // fully qualified. Find the exact match + for plural, singular := range m.pluralToSingular { + if singular == resource { + ret = append(ret, plural) + break + } + if plural == resource { + ret = append(ret, plural) + break + } + } + + case hasGroup: + // given a group, prefer an exact match. If you don't find one, resort to a prefix match on group + foundExactMatch := false + requestedGroupResource := resource.GroupResource() + for plural, singular := range m.pluralToSingular { + if singular.GroupResource() == requestedGroupResource { + foundExactMatch = true + ret = append(ret, plural) + } + if plural.GroupResource() == requestedGroupResource { + foundExactMatch = true + ret = append(ret, plural) + } + } + + // if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match + // storageclass.storage.k8s.io + if !foundExactMatch { + for plural, singular := range m.pluralToSingular { + if !strings.HasPrefix(plural.Group, requestedGroupResource.Group) { + continue + } + if singular.Resource == requestedGroupResource.Resource { + ret = append(ret, plural) + } + if plural.Resource == requestedGroupResource.Resource { + ret = append(ret, plural) + } + } + + } + + case hasVersion: + for plural, singular := range m.pluralToSingular { + if singular.Version == resource.Version && singular.Resource == resource.Resource { + ret = append(ret, plural) + } + if plural.Version == resource.Version && plural.Resource == resource.Resource { + ret = append(ret, plural) + } + } + + default: + for plural, singular := range m.pluralToSingular { + if singular.Resource == resource.Resource { + ret = append(ret, plural) + } + if plural.Resource == resource.Resource { + ret = append(ret, plural) + } + } + } + + if len(ret) == 0 { + return nil, &NoResourceMatchError{PartialResource: resource} + } + + sort.Sort(resourceByPreferredGroupVersion{ret, m.defaultGroupVersions}) + return ret, nil +} + +func (m *DefaultRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + resources, err := m.ResourcesFor(resource) + if err != nil { + return schema.GroupVersionResource{}, err + } + if len(resources) == 1 { + return resources[0], nil + } + + return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources} +} + +func (m *DefaultRESTMapper) KindsFor(input schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + resource := coerceResourceForMatching(input) + + hasResource := len(resource.Resource) > 0 + hasGroup := len(resource.Group) > 0 + hasVersion := len(resource.Version) > 0 + + if !hasResource { + return nil, fmt.Errorf("a resource must be present, got: %v", resource) + } + + ret := []schema.GroupVersionKind{} + switch { + // fully qualified. Find the exact match + case hasGroup && hasVersion: + kind, exists := m.resourceToKind[resource] + if exists { + ret = append(ret, kind) + } + + case hasGroup: + foundExactMatch := false + requestedGroupResource := resource.GroupResource() + for currResource, currKind := range m.resourceToKind { + if currResource.GroupResource() == requestedGroupResource { + foundExactMatch = true + ret = append(ret, currKind) + } + } + + // if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match + // storageclass.storage.k8s.io + if !foundExactMatch { + for currResource, currKind := range m.resourceToKind { + if !strings.HasPrefix(currResource.Group, requestedGroupResource.Group) { + continue + } + if currResource.Resource == requestedGroupResource.Resource { + ret = append(ret, currKind) + } + } + + } + + case hasVersion: + for currResource, currKind := range m.resourceToKind { + if currResource.Version == resource.Version && currResource.Resource == resource.Resource { + ret = append(ret, currKind) + } + } + + default: + for currResource, currKind := range m.resourceToKind { + if currResource.Resource == resource.Resource { + ret = append(ret, currKind) + } + } + } + + if len(ret) == 0 { + return nil, &NoResourceMatchError{PartialResource: input} + } + + sort.Sort(kindByPreferredGroupVersion{ret, m.defaultGroupVersions}) + return ret, nil +} + +func (m *DefaultRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + kinds, err := m.KindsFor(resource) + if err != nil { + return schema.GroupVersionKind{}, err + } + if len(kinds) == 1 { + return kinds[0], nil + } + + return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds} +} + +type kindByPreferredGroupVersion struct { + list []schema.GroupVersionKind + sortOrder []schema.GroupVersion +} + +func (o kindByPreferredGroupVersion) Len() int { return len(o.list) } +func (o kindByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] } +func (o kindByPreferredGroupVersion) Less(i, j int) bool { + lhs := o.list[i] + rhs := o.list[j] + if lhs == rhs { + return false + } + + if lhs.GroupVersion() == rhs.GroupVersion() { + return lhs.Kind < rhs.Kind + } + + // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order + lhsIndex := -1 + rhsIndex := -1 + + for i := range o.sortOrder { + if o.sortOrder[i] == lhs.GroupVersion() { + lhsIndex = i + } + if o.sortOrder[i] == rhs.GroupVersion() { + rhsIndex = i + } + } + + if rhsIndex == -1 { + return true + } + + return lhsIndex < rhsIndex +} + +type resourceByPreferredGroupVersion struct { + list []schema.GroupVersionResource + sortOrder []schema.GroupVersion +} + +func (o resourceByPreferredGroupVersion) Len() int { return len(o.list) } +func (o resourceByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] } +func (o resourceByPreferredGroupVersion) Less(i, j int) bool { + lhs := o.list[i] + rhs := o.list[j] + if lhs == rhs { + return false + } + + if lhs.GroupVersion() == rhs.GroupVersion() { + return lhs.Resource < rhs.Resource + } + + // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order + lhsIndex := -1 + rhsIndex := -1 + + for i := range o.sortOrder { + if o.sortOrder[i] == lhs.GroupVersion() { + lhsIndex = i + } + if o.sortOrder[i] == rhs.GroupVersion() { + rhsIndex = i + } + } + + if rhsIndex == -1 { + return true + } + + return lhsIndex < rhsIndex +} + +// RESTMapping returns a struct representing the resource path and conversion interfaces a +// RESTClient should use to operate on the provided group/kind in order of versions. If a version search +// order is not provided, the search order provided to DefaultRESTMapper will be used to resolve which +// version should be used to access the named group/kind. +func (m *DefaultRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { + mappings, err := m.RESTMappings(gk, versions...) + if err != nil { + return nil, err + } + if len(mappings) == 0 { + return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} + } + // since we rely on RESTMappings method + // take the first match and return to the caller + // as this was the existing behavior. + return mappings[0], nil +} + +// RESTMappings returns the RESTMappings for the provided group kind. If a version search order +// is not provided, the search order provided to DefaultRESTMapper will be used. +func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { + mappings := make([]*RESTMapping, 0) + potentialGVK := make([]schema.GroupVersionKind, 0) + hadVersion := false + + // Pick an appropriate version + for _, version := range versions { + if len(version) == 0 || version == runtime.APIVersionInternal { + continue + } + currGVK := gk.WithVersion(version) + hadVersion = true + if _, ok := m.kindToPluralResource[currGVK]; ok { + potentialGVK = append(potentialGVK, currGVK) + break + } + } + // Use the default preferred versions + if !hadVersion && len(potentialGVK) == 0 { + for _, gv := range m.defaultGroupVersions { + if gv.Group != gk.Group { + continue + } + potentialGVK = append(potentialGVK, gk.WithVersion(gv.Version)) + } + } + + if len(potentialGVK) == 0 { + return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} + } + + for _, gvk := range potentialGVK { + //Ensure we have a REST mapping + res, ok := m.kindToPluralResource[gvk] + if !ok { + continue + } + + // Ensure we have a REST scope + scope, ok := m.kindToScope[gvk] + if !ok { + return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", gvk.GroupVersion(), gvk.Kind) + } + + interfaces, err := m.interfacesFunc(gvk.GroupVersion()) + if err != nil { + return nil, fmt.Errorf("the provided version %q has no relevant versions: %v", gvk.GroupVersion().String(), err) + } + + mappings = append(mappings, &RESTMapping{ + Resource: res.Resource, + GroupVersionKind: gvk, + Scope: scope, + + ObjectConvertor: interfaces.ObjectConvertor, + MetadataAccessor: interfaces.MetadataAccessor, + }) + } + + if len(mappings) == 0 { + return nil, &NoResourceMatchError{PartialResource: schema.GroupVersionResource{Group: gk.Group, Resource: gk.Kind}} + } + return mappings, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go b/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go new file mode 100644 index 000000000..3ebf24815 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go @@ -0,0 +1,31 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// InterfacesForUnstructured returns VersionInterfaces suitable for +// dealing with unstructured.Unstructured objects. +func InterfacesForUnstructured(schema.GroupVersion) (*VersionInterfaces, error) { + return &VersionInterfaces{ + ObjectConvertor: &unstructured.UnstructuredObjectConverter{}, + MetadataAccessor: NewAccessor(), + }, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go new file mode 100644 index 000000000..a8866a43e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go @@ -0,0 +1,299 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "math/big" + "strconv" + + inf "gopkg.in/inf.v0" +) + +// Scale is used for getting and setting the base-10 scaled value. +// Base-2 scales are omitted for mathematical simplicity. +// See Quantity.ScaledValue for more details. +type Scale int32 + +// infScale adapts a Scale value to an inf.Scale value. +func (s Scale) infScale() inf.Scale { + return inf.Scale(-s) // inf.Scale is upside-down +} + +const ( + Nano Scale = -9 + Micro Scale = -6 + Milli Scale = -3 + Kilo Scale = 3 + Mega Scale = 6 + Giga Scale = 9 + Tera Scale = 12 + Peta Scale = 15 + Exa Scale = 18 +) + +var ( + Zero = int64Amount{} + + // Used by quantity strings - treat as read only + zeroBytes = []byte("0") +) + +// int64Amount represents a fixed precision numerator and arbitrary scale exponent. It is faster +// than operations on inf.Dec for values that can be represented as int64. +// +k8s:openapi-gen=true +type int64Amount struct { + value int64 + scale Scale +} + +// Sign returns 0 if the value is zero, -1 if it is less than 0, or 1 if it is greater than 0. +func (a int64Amount) Sign() int { + switch { + case a.value == 0: + return 0 + case a.value > 0: + return 1 + default: + return -1 + } +} + +// AsInt64 returns the current amount as an int64 at scale 0, or false if the value cannot be +// represented in an int64 OR would result in a loss of precision. This method is intended as +// an optimization to avoid calling AsDec. +func (a int64Amount) AsInt64() (int64, bool) { + if a.scale == 0 { + return a.value, true + } + if a.scale < 0 { + // TODO: attempt to reduce factors, although it is assumed that factors are reduced prior + // to the int64Amount being created. + return 0, false + } + return positiveScaleInt64(a.value, a.scale) +} + +// AsScaledInt64 returns an int64 representing the value of this amount at the specified scale, +// rounding up, or false if that would result in overflow. (1e20).AsScaledInt64(1) would result +// in overflow because 1e19 is not representable as an int64. Note that setting a scale larger +// than the current value may result in loss of precision - i.e. (1e-6).AsScaledInt64(0) would +// return 1, because 0.000001 is rounded up to 1. +func (a int64Amount) AsScaledInt64(scale Scale) (result int64, ok bool) { + if a.scale < scale { + result, _ = negativeScaleInt64(a.value, scale-a.scale) + return result, true + } + return positiveScaleInt64(a.value, a.scale-scale) +} + +// AsDec returns an inf.Dec representation of this value. +func (a int64Amount) AsDec() *inf.Dec { + var base inf.Dec + base.SetUnscaled(a.value) + base.SetScale(inf.Scale(-a.scale)) + return &base +} + +// Cmp returns 0 if a and b are equal, 1 if a is greater than b, or -1 if a is less than b. +func (a int64Amount) Cmp(b int64Amount) int { + switch { + case a.scale == b.scale: + // compare only the unscaled portion + case a.scale > b.scale: + result, remainder, exact := divideByScaleInt64(b.value, a.scale-b.scale) + if !exact { + return a.AsDec().Cmp(b.AsDec()) + } + if result == a.value { + switch { + case remainder == 0: + return 0 + case remainder > 0: + return -1 + default: + return 1 + } + } + b.value = result + default: + result, remainder, exact := divideByScaleInt64(a.value, b.scale-a.scale) + if !exact { + return a.AsDec().Cmp(b.AsDec()) + } + if result == b.value { + switch { + case remainder == 0: + return 0 + case remainder > 0: + return 1 + default: + return -1 + } + } + a.value = result + } + + switch { + case a.value == b.value: + return 0 + case a.value < b.value: + return -1 + default: + return 1 + } +} + +// Add adds two int64Amounts together, matching scales. It will return false and not mutate +// a if overflow or underflow would result. +func (a *int64Amount) Add(b int64Amount) bool { + switch { + case b.value == 0: + return true + case a.value == 0: + a.value = b.value + a.scale = b.scale + return true + case a.scale == b.scale: + c, ok := int64Add(a.value, b.value) + if !ok { + return false + } + a.value = c + case a.scale > b.scale: + c, ok := positiveScaleInt64(a.value, a.scale-b.scale) + if !ok { + return false + } + c, ok = int64Add(c, b.value) + if !ok { + return false + } + a.scale = b.scale + a.value = c + default: + c, ok := positiveScaleInt64(b.value, b.scale-a.scale) + if !ok { + return false + } + c, ok = int64Add(a.value, c) + if !ok { + return false + } + a.value = c + } + return true +} + +// Sub removes the value of b from the current amount, or returns false if underflow would result. +func (a *int64Amount) Sub(b int64Amount) bool { + return a.Add(int64Amount{value: -b.value, scale: b.scale}) +} + +// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision +// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. +func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) { + if a.scale >= scale { + return a, true + } + result, exact := negativeScaleInt64(a.value, scale-a.scale) + return int64Amount{value: result, scale: scale}, exact +} + +// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted +// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3. +func (a int64Amount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { + mantissa := a.value + exponent = int32(a.scale) + + amount, times := removeInt64Factors(mantissa, 10) + exponent += int32(times) + + // make sure exponent is a multiple of 3 + var ok bool + switch exponent % 3 { + case 1, -2: + amount, ok = int64MultiplyScale10(amount) + if !ok { + return infDecAmount{a.AsDec()}.AsCanonicalBytes(out) + } + exponent = exponent - 1 + case 2, -1: + amount, ok = int64MultiplyScale100(amount) + if !ok { + return infDecAmount{a.AsDec()}.AsCanonicalBytes(out) + } + exponent = exponent - 2 + } + return strconv.AppendInt(out, amount, 10), exponent +} + +// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would +// return []byte("2048"), 1. +func (a int64Amount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) { + value, ok := a.AsScaledInt64(0) + if !ok { + return infDecAmount{a.AsDec()}.AsCanonicalBase1024Bytes(out) + } + amount, exponent := removeInt64Factors(value, 1024) + return strconv.AppendInt(out, amount, 10), exponent +} + +// infDecAmount implements common operations over an inf.Dec that are specific to the quantity +// representation. +type infDecAmount struct { + *inf.Dec +} + +// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision +// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. +func (a infDecAmount) AsScale(scale Scale) (infDecAmount, bool) { + tmp := &inf.Dec{} + tmp.Round(a.Dec, scale.infScale(), inf.RoundUp) + return infDecAmount{tmp}, tmp.Cmp(a.Dec) == 0 +} + +// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted +// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3. +func (a infDecAmount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { + mantissa := a.Dec.UnscaledBig() + exponent = int32(-a.Dec.Scale()) + amount := big.NewInt(0).Set(mantissa) + // move all factors of 10 into the exponent for easy reasoning + amount, times := removeBigIntFactors(amount, bigTen) + exponent += times + + // make sure exponent is a multiple of 3 + for exponent%3 != 0 { + amount.Mul(amount, bigTen) + exponent-- + } + + return append(out, amount.String()...), exponent +} + +// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would +// return []byte("2048"), 1. +func (a infDecAmount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) { + tmp := &inf.Dec{} + tmp.Round(a.Dec, 0, inf.RoundUp) + amount, exponent := removeBigIntFactors(tmp.UnscaledBig(), big1024) + return append(out, amount.String()...), exponent +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go new file mode 100644 index 000000000..8b2e338a7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -0,0 +1,77 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +// DO NOT EDIT! + +/* + Package resource is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto + + It has these top-level messages: + Quantity +*/ +package resource + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Quantity) Reset() { *m = Quantity{} } +func (*Quantity) ProtoMessage() {} +func (*Quantity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func init() { + proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 255 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x8f, 0xa1, 0x4e, 0x03, 0x41, + 0x10, 0x86, 0x77, 0x0d, 0x29, 0x95, 0x0d, 0x21, 0xa4, 0x62, 0xaf, 0x21, 0x08, 0x0c, 0x3b, 0x02, + 0xd3, 0x20, 0xf1, 0x08, 0x90, 0xb8, 0xbb, 0xeb, 0xb0, 0xdd, 0x1c, 0xdd, 0xbd, 0xcc, 0xce, 0x92, + 0xd4, 0x55, 0x22, 0x2b, 0x91, 0xbd, 0xb7, 0xa9, 0xac, 0xac, 0x40, 0x70, 0xcb, 0x8b, 0x90, 0x5e, + 0xdb, 0x84, 0x90, 0xe0, 0xe6, 0xfb, 0x27, 0xdf, 0xe4, 0x9f, 0xfe, 0x43, 0x35, 0x0e, 0xda, 0x7a, + 0xa8, 0x62, 0x81, 0xe4, 0x90, 0x31, 0xc0, 0x1b, 0xba, 0x89, 0x27, 0x38, 0x2c, 0xf2, 0xda, 0xce, + 0xf2, 0x72, 0x6a, 0x1d, 0xd2, 0x1c, 0xea, 0xca, 0xec, 0x02, 0x20, 0x0c, 0x3e, 0x52, 0x89, 0x60, + 0xd0, 0x21, 0xe5, 0x8c, 0x13, 0x5d, 0x93, 0x67, 0x3f, 0xb8, 0xda, 0x5b, 0xfa, 0xb7, 0xa5, 0xeb, + 0xca, 0xec, 0x02, 0x7d, 0xb4, 0x86, 0x37, 0xc6, 0xf2, 0x34, 0x16, 0xba, 0xf4, 0x33, 0x30, 0xde, + 0x78, 0xe8, 0xe4, 0x22, 0xbe, 0x74, 0xd4, 0x41, 0x37, 0xed, 0x8f, 0x0e, 0x6f, 0xff, 0xab, 0x12, + 0xd9, 0xbe, 0x82, 0x75, 0x1c, 0x98, 0xfe, 0x36, 0xb9, 0x1c, 0xf7, 0x7b, 0x8f, 0x31, 0x77, 0x6c, + 0x79, 0x3e, 0x38, 0xef, 0x9f, 0x04, 0x26, 0xeb, 0xcc, 0x85, 0x1c, 0xc9, 0xeb, 0xd3, 0xa7, 0x03, + 0xdd, 0x9d, 0x7d, 0xac, 0x32, 0xf1, 0xde, 0x64, 0x62, 0xd9, 0x64, 0x62, 0xd5, 0x64, 0x62, 0xf1, + 0x39, 0x12, 0xf7, 0x7a, 0xdd, 0x2a, 0xb1, 0x69, 0x95, 0xd8, 0xb6, 0x4a, 0x2c, 0x92, 0x92, 0xeb, + 0xa4, 0xe4, 0x26, 0x29, 0xb9, 0x4d, 0x4a, 0x7e, 0x25, 0x25, 0x97, 0xdf, 0x4a, 0x3c, 0xf7, 0x8e, + 0xdf, 0xfc, 0x04, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x5e, 0xda, 0xf9, 0x43, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto new file mode 100644 index 000000000..608299da4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.api.resource; + +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "resource"; + +// Quantity is a fixed-point representation of a number. +// It provides convenient marshaling/unmarshaling in JSON and YAML, +// in addition to String() and Int64() accessors. +// +// The serialization format is: +// +// ::= +// (Note that may be empty, from the "" case in .) +// ::= 0 | 1 | ... | 9 +// ::= | +// ::= | . | . | . +// ::= "+" | "-" +// ::= | +// ::= | | +// ::= Ki | Mi | Gi | Ti | Pi | Ei +// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// ::= m | "" | k | M | G | T | P | E +// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// ::= "e" | "E" +// +// No matter which of the three exponent forms is used, no quantity may represent +// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal +// places. Numbers larger or more precise will be capped or rounded up. +// (E.g.: 0.1m will rounded up to 1m.) +// This may be extended in the future if we require larger or smaller quantities. +// +// When a Quantity is parsed from a string, it will remember the type of suffix +// it had, and will use the same type again when it is serialized. +// +// Before serializing, Quantity will be put in "canonical form". +// This means that Exponent/suffix will be adjusted up or down (with a +// corresponding increase or decrease in Mantissa) such that: +// a. No precision is lost +// b. No fractional digits will be emitted +// c. The exponent (or suffix) is as large as possible. +// The sign will be omitted unless the number is negative. +// +// Examples: +// 1.5 will be serialized as "1500m" +// 1.5Gi will be serialized as "1536Mi" +// +// NOTE: We reserve the right to amend this canonical format, perhaps to +// allow 1.5 to be canonical. +// TODO: Remove above disclaimer after all bikeshedding about format is over, +// or after March 2015. +// +// Note that the quantity will NEVER be internally represented by a +// floating point number. That is the whole point of this exercise. +// +// Non-canonical values will still parse as long as they are well formed, +// but will be re-emitted in their canonical form. (So always use canonical +// form, or don't diff.) +// +// This format is intended to make it difficult to use these numbers without +// writing some sort of special handling code in the hopes that that will +// cause implementors to also use a fixed point implementation. +// +// +protobuf=true +// +protobuf.embed=string +// +protobuf.options.marshal=false +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +message Quantity { + optional string string = 1; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go new file mode 100644 index 000000000..72d3880c0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go @@ -0,0 +1,314 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "math/big" + + inf "gopkg.in/inf.v0" +) + +const ( + // maxInt64Factors is the highest value that will be checked when removing factors of 10 from an int64. + // It is also the maximum decimal digits that can be represented with an int64. + maxInt64Factors = 18 +) + +var ( + // Commonly needed big.Int values-- treat as read only! + bigTen = big.NewInt(10) + bigZero = big.NewInt(0) + bigOne = big.NewInt(1) + bigThousand = big.NewInt(1000) + big1024 = big.NewInt(1024) + + // Commonly needed inf.Dec values-- treat as read only! + decZero = inf.NewDec(0, 0) + decOne = inf.NewDec(1, 0) + decMinusOne = inf.NewDec(-1, 0) + decThousand = inf.NewDec(1000, 0) + dec1024 = inf.NewDec(1024, 0) + decMinus1024 = inf.NewDec(-1024, 0) + + // Largest (in magnitude) number allowed. + maxAllowed = infDecAmount{inf.NewDec((1<<63)-1, 0)} // == max int64 + + // The maximum value we can represent milli-units for. + // Compare with the return value of Quantity.Value() to + // see if it's safe to use Quantity.MilliValue(). + MaxMilliValue = int64(((1 << 63) - 1) / 1000) +) + +const mostNegative = -(mostPositive + 1) +const mostPositive = 1<<63 - 1 + +// int64Add returns a+b, or false if that would overflow int64. +func int64Add(a, b int64) (int64, bool) { + c := a + b + switch { + case a > 0 && b > 0: + if c < 0 { + return 0, false + } + case a < 0 && b < 0: + if c > 0 { + return 0, false + } + if a == mostNegative && b == mostNegative { + return 0, false + } + } + return c, true +} + +// int64Multiply returns a*b, or false if that would overflow or underflow int64. +func int64Multiply(a, b int64) (int64, bool) { + if a == 0 || b == 0 || a == 1 || b == 1 { + return a * b, true + } + if a == mostNegative || b == mostNegative { + return 0, false + } + c := a * b + return c, c/b == a +} + +// int64MultiplyScale returns a*b, assuming b is greater than one, or false if that would overflow or underflow int64. +// Use when b is known to be greater than one. +func int64MultiplyScale(a int64, b int64) (int64, bool) { + if a == 0 || a == 1 { + return a * b, true + } + if a == mostNegative && b != 1 { + return 0, false + } + c := a * b + return c, c/b == a +} + +// int64MultiplyScale10 multiplies a by 10, or returns false if that would overflow. This method is faster than +// int64Multiply(a, 10) because the compiler can optimize constant factor multiplication. +func int64MultiplyScale10(a int64) (int64, bool) { + if a == 0 || a == 1 { + return a * 10, true + } + if a == mostNegative { + return 0, false + } + c := a * 10 + return c, c/10 == a +} + +// int64MultiplyScale100 multiplies a by 100, or returns false if that would overflow. This method is faster than +// int64Multiply(a, 100) because the compiler can optimize constant factor multiplication. +func int64MultiplyScale100(a int64) (int64, bool) { + if a == 0 || a == 1 { + return a * 100, true + } + if a == mostNegative { + return 0, false + } + c := a * 100 + return c, c/100 == a +} + +// int64MultiplyScale1000 multiplies a by 1000, or returns false if that would overflow. This method is faster than +// int64Multiply(a, 1000) because the compiler can optimize constant factor multiplication. +func int64MultiplyScale1000(a int64) (int64, bool) { + if a == 0 || a == 1 { + return a * 1000, true + } + if a == mostNegative { + return 0, false + } + c := a * 1000 + return c, c/1000 == a +} + +// positiveScaleInt64 multiplies base by 10^scale, returning false if the +// value overflows. Passing a negative scale is undefined. +func positiveScaleInt64(base int64, scale Scale) (int64, bool) { + switch scale { + case 0: + return base, true + case 1: + return int64MultiplyScale10(base) + case 2: + return int64MultiplyScale100(base) + case 3: + return int64MultiplyScale1000(base) + case 6: + return int64MultiplyScale(base, 1000000) + case 9: + return int64MultiplyScale(base, 1000000000) + default: + value := base + var ok bool + for i := Scale(0); i < scale; i++ { + if value, ok = int64MultiplyScale(value, 10); !ok { + return 0, false + } + } + return value, true + } +} + +// negativeScaleInt64 reduces base by the provided scale, rounding up, until the +// value is zero or the scale is reached. Passing a negative scale is undefined. +// The value returned, if not exact, is rounded away from zero. +func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) { + if scale == 0 { + return base, true + } + + value := base + var fraction bool + for i := Scale(0); i < scale; i++ { + if !fraction && value%10 != 0 { + fraction = true + } + value = value / 10 + if value == 0 { + if fraction { + if base > 0 { + return 1, false + } + return -1, false + } + return 0, true + } + } + if fraction { + if base > 0 { + value += 1 + } else { + value += -1 + } + } + return value, !fraction +} + +func pow10Int64(b int64) int64 { + switch b { + case 0: + return 1 + case 1: + return 10 + case 2: + return 100 + case 3: + return 1000 + case 4: + return 10000 + case 5: + return 100000 + case 6: + return 1000000 + case 7: + return 10000000 + case 8: + return 100000000 + case 9: + return 1000000000 + case 10: + return 10000000000 + case 11: + return 100000000000 + case 12: + return 1000000000000 + case 13: + return 10000000000000 + case 14: + return 100000000000000 + case 15: + return 1000000000000000 + case 16: + return 10000000000000000 + case 17: + return 100000000000000000 + case 18: + return 1000000000000000000 + default: + return 0 + } +} + +// negativeScaleInt64 returns the result of dividing base by scale * 10 and the remainder, or +// false if no such division is possible. Dividing by negative scales is undefined. +func divideByScaleInt64(base int64, scale Scale) (result, remainder int64, exact bool) { + if scale == 0 { + return base, 0, true + } + // the max scale representable in base 10 in an int64 is 18 decimal places + if scale >= 18 { + return 0, base, false + } + divisor := pow10Int64(int64(scale)) + return base / divisor, base % divisor, true +} + +// removeInt64Factors divides in a loop; the return values have the property that +// value == result * base ^ scale +func removeInt64Factors(value int64, base int64) (result int64, times int32) { + times = 0 + result = value + negative := result < 0 + if negative { + result = -result + } + switch base { + // allow the compiler to optimize the common cases + case 10: + for result >= 10 && result%10 == 0 { + times++ + result = result / 10 + } + // allow the compiler to optimize the common cases + case 1024: + for result >= 1024 && result%1024 == 0 { + times++ + result = result / 1024 + } + default: + for result >= base && result%base == 0 { + times++ + result = result / base + } + } + if negative { + result = -result + } + return result, times +} + +// removeBigIntFactors divides in a loop; the return values have the property that +// d == result * factor ^ times +// d may be modified in place. +// If d == 0, then the return values will be (0, 0) +func removeBigIntFactors(d, factor *big.Int) (result *big.Int, times int32) { + q := big.NewInt(0) + m := big.NewInt(0) + for d.Cmp(bigZero) != 0 { + q.DivMod(d, factor, m) + if m.Cmp(bigZero) != 0 { + break + } + times++ + d, q = q, d + } + return d, times +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go new file mode 100644 index 000000000..3a9560882 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -0,0 +1,792 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "bytes" + "errors" + "fmt" + "math/big" + "regexp" + "strconv" + "strings" + + flag "github.com/spf13/pflag" + + "github.com/go-openapi/spec" + inf "gopkg.in/inf.v0" + "k8s.io/apimachinery/pkg/openapi" +) + +// Quantity is a fixed-point representation of a number. +// It provides convenient marshaling/unmarshaling in JSON and YAML, +// in addition to String() and Int64() accessors. +// +// The serialization format is: +// +// ::= +// (Note that may be empty, from the "" case in .) +// ::= 0 | 1 | ... | 9 +// ::= | +// ::= | . | . | . +// ::= "+" | "-" +// ::= | +// ::= | | +// ::= Ki | Mi | Gi | Ti | Pi | Ei +// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// ::= m | "" | k | M | G | T | P | E +// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// ::= "e" | "E" +// +// No matter which of the three exponent forms is used, no quantity may represent +// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal +// places. Numbers larger or more precise will be capped or rounded up. +// (E.g.: 0.1m will rounded up to 1m.) +// This may be extended in the future if we require larger or smaller quantities. +// +// When a Quantity is parsed from a string, it will remember the type of suffix +// it had, and will use the same type again when it is serialized. +// +// Before serializing, Quantity will be put in "canonical form". +// This means that Exponent/suffix will be adjusted up or down (with a +// corresponding increase or decrease in Mantissa) such that: +// a. No precision is lost +// b. No fractional digits will be emitted +// c. The exponent (or suffix) is as large as possible. +// The sign will be omitted unless the number is negative. +// +// Examples: +// 1.5 will be serialized as "1500m" +// 1.5Gi will be serialized as "1536Mi" +// +// NOTE: We reserve the right to amend this canonical format, perhaps to +// allow 1.5 to be canonical. +// TODO: Remove above disclaimer after all bikeshedding about format is over, +// or after March 2015. +// +// Note that the quantity will NEVER be internally represented by a +// floating point number. That is the whole point of this exercise. +// +// Non-canonical values will still parse as long as they are well formed, +// but will be re-emitted in their canonical form. (So always use canonical +// form, or don't diff.) +// +// This format is intended to make it difficult to use these numbers without +// writing some sort of special handling code in the hopes that that will +// cause implementors to also use a fixed point implementation. +// +// +protobuf=true +// +protobuf.embed=string +// +protobuf.options.marshal=false +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +type Quantity struct { + // i is the quantity in int64 scaled form, if d.Dec == nil + i int64Amount + // d is the quantity in inf.Dec form if d.Dec != nil + d infDecAmount + // s is the generated value of this quantity to avoid recalculation + s string + + // Change Format at will. See the comment for Canonicalize for + // more details. + Format +} + +// CanonicalValue allows a quantity amount to be converted to a string. +type CanonicalValue interface { + // AsCanonicalBytes returns a byte array representing the string representation + // of the value mantissa and an int32 representing its exponent in base-10. Callers may + // pass a byte slice to the method to avoid allocations. + AsCanonicalBytes(out []byte) ([]byte, int32) + // AsCanonicalBase1024Bytes returns a byte array representing the string representation + // of the value mantissa and an int32 representing its exponent in base-1024. Callers + // may pass a byte slice to the method to avoid allocations. + AsCanonicalBase1024Bytes(out []byte) ([]byte, int32) +} + +// Format lists the three possible formattings of a quantity. +type Format string + +const ( + DecimalExponent = Format("DecimalExponent") // e.g., 12e6 + BinarySI = Format("BinarySI") // e.g., 12Mi (12 * 2^20) + DecimalSI = Format("DecimalSI") // e.g., 12M (12 * 10^6) +) + +// MustParse turns the given string into a quantity or panics; for tests +// or others cases where you know the string is valid. +func MustParse(str string) Quantity { + q, err := ParseQuantity(str) + if err != nil { + panic(fmt.Errorf("cannot parse '%v': %v", str, err)) + } + return q +} + +const ( + // splitREString is used to separate a number from its suffix; as such, + // this is overly permissive, but that's OK-- it will be checked later. + splitREString = "^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$" +) + +var ( + // splitRE is used to get the various parts of a number. + splitRE = regexp.MustCompile(splitREString) + + // Errors that could happen while parsing a string. + ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'") + ErrNumeric = errors.New("unable to parse numeric part of quantity") + ErrSuffix = errors.New("unable to parse quantity's suffix") +) + +// parseQuantityString is a fast scanner for quantity values. +func parseQuantityString(str string) (positive bool, value, num, denom, suffix string, err error) { + positive = true + pos := 0 + end := len(str) + + // handle leading sign + if pos < end { + switch str[0] { + case '-': + positive = false + pos++ + case '+': + pos++ + } + } + + // strip leading zeros +Zeroes: + for i := pos; ; i++ { + if i >= end { + num = "0" + value = num + return + } + switch str[i] { + case '0': + pos++ + default: + break Zeroes + } + } + + // extract the numerator +Num: + for i := pos; ; i++ { + if i >= end { + num = str[pos:end] + value = str[0:end] + return + } + switch str[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + num = str[pos:i] + pos = i + break Num + } + } + + // if we stripped all numerator positions, always return 0 + if len(num) == 0 { + num = "0" + } + + // handle a denominator + if pos < end && str[pos] == '.' { + pos++ + Denom: + for i := pos; ; i++ { + if i >= end { + denom = str[pos:end] + value = str[0:end] + return + } + switch str[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + denom = str[pos:i] + pos = i + break Denom + } + } + // TODO: we currently allow 1.G, but we may not want to in the future. + // if len(denom) == 0 { + // err = ErrFormatWrong + // return + // } + } + value = str[0:pos] + + // grab the elements of the suffix + suffixStart := pos + for i := pos; ; i++ { + if i >= end { + suffix = str[suffixStart:end] + return + } + if !strings.ContainsAny(str[i:i+1], "eEinumkKMGTP") { + pos = i + break + } + } + if pos < end { + switch str[pos] { + case '-', '+': + pos++ + } + } +Suffix: + for i := pos; ; i++ { + if i >= end { + suffix = str[suffixStart:end] + return + } + switch str[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + break Suffix + } + } + // we encountered a non decimal in the Suffix loop, but the last character + // was not a valid exponent + err = ErrFormatWrong + return +} + +// ParseQuantity turns str into a Quantity, or returns an error. +func ParseQuantity(str string) (Quantity, error) { + if len(str) == 0 { + return Quantity{}, ErrFormatWrong + } + if str == "0" { + return Quantity{Format: DecimalSI, s: str}, nil + } + + positive, value, num, denom, suf, err := parseQuantityString(str) + if err != nil { + return Quantity{}, err + } + + base, exponent, format, ok := quantitySuffixer.interpret(suffix(suf)) + if !ok { + return Quantity{}, ErrSuffix + } + + precision := int32(0) + scale := int32(0) + mantissa := int64(1) + switch format { + case DecimalExponent, DecimalSI: + scale = exponent + precision = maxInt64Factors - int32(len(num)+len(denom)) + case BinarySI: + scale = 0 + switch { + case exponent >= 0 && len(denom) == 0: + // only handle positive binary numbers with the fast path + mantissa = int64(int64(mantissa) << uint64(exponent)) + // 1Mi (2^20) has ~6 digits of decimal precision, so exponent*3/10 -1 is roughly the precision + precision = 15 - int32(len(num)) - int32(float32(exponent)*3/10) - 1 + default: + precision = -1 + } + } + + if precision >= 0 { + // if we have a denominator, shift the entire value to the left by the number of places in the + // denominator + scale -= int32(len(denom)) + if scale >= int32(Nano) { + shifted := num + denom + + var value int64 + value, err := strconv.ParseInt(shifted, 10, 64) + if err != nil { + return Quantity{}, ErrNumeric + } + if result, ok := int64Multiply(value, int64(mantissa)); ok { + if !positive { + result = -result + } + // if the number is in canonical form, reuse the string + switch format { + case BinarySI: + if exponent%10 == 0 && (value&0x07 != 0) { + return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil + } + default: + if scale%3 == 0 && !strings.HasSuffix(shifted, "000") && shifted[0] != '0' { + return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil + } + } + return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format}, nil + } + } + } + + amount := new(inf.Dec) + if _, ok := amount.SetString(value); !ok { + return Quantity{}, ErrNumeric + } + + // So that no one but us has to think about suffixes, remove it. + if base == 10 { + amount.SetScale(amount.Scale() + Scale(exponent).infScale()) + } else if base == 2 { + // numericSuffix = 2 ** exponent + numericSuffix := big.NewInt(1).Lsh(bigOne, uint(exponent)) + ub := amount.UnscaledBig() + amount.SetUnscaledBig(ub.Mul(ub, numericSuffix)) + } + + // Cap at min/max bounds. + sign := amount.Sign() + if sign == -1 { + amount.Neg(amount) + } + + // This rounds non-zero values up to the minimum representable value, under the theory that + // if you want some resources, you should get some resources, even if you asked for way too small + // of an amount. Arguably, this should be inf.RoundHalfUp (normal rounding), but that would have + // the side effect of rounding values < .5n to zero. + if v, ok := amount.Unscaled(); v != int64(0) || !ok { + amount.Round(amount, Nano.infScale(), inf.RoundUp) + } + + // The max is just a simple cap. + // TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster + if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 { + amount.Set(maxAllowed.Dec) + } + + if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 { + // This avoids rounding and hopefully confusion, too. + format = DecimalSI + } + if sign == -1 { + amount.Neg(amount) + } + + return Quantity{d: infDecAmount{amount}, Format: format}, nil +} + +// DeepCopy returns a deep-copy of the Quantity value. Note that the method +// receiver is a value, so we can mutate it in-place and return it. +func (q Quantity) DeepCopy() Quantity { + if q.d.Dec != nil { + tmp := &inf.Dec{} + q.d.Dec = tmp.Set(q.d.Dec) + } + return q +} + +// OpenAPIDefinition returns openAPI definition for this type. +func (_ Quantity) OpenAPIDefinition() openapi.OpenAPIDefinition { + return openapi.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + } +} + +// CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity). +// +// Note about BinarySI: +// * If q.Format is set to BinarySI and q.Amount represents a non-zero value between +// -1 and +1, it will be emitted as if q.Format were DecimalSI. +// * Otherwise, if q.Format is set to BinarySI, frational parts of q.Amount will be +// rounded up. (1.1i becomes 2i.) +func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { + if q.IsZero() { + return zeroBytes, nil + } + + var rounded CanonicalValue + format := q.Format + switch format { + case DecimalExponent, DecimalSI: + case BinarySI: + if q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 { + // This avoids rounding and hopefully confusion, too. + format = DecimalSI + } else { + var exact bool + if rounded, exact = q.AsScale(0); !exact { + // Don't lose precision-- show as DecimalSI + format = DecimalSI + } + } + default: + format = DecimalExponent + } + + // TODO: If BinarySI formatting is requested but would cause rounding, upgrade to + // one of the other formats. + switch format { + case DecimalExponent, DecimalSI: + number, exponent := q.AsCanonicalBytes(out) + suffix, _ := quantitySuffixer.constructBytes(10, exponent, format) + return number, suffix + default: + // format must be BinarySI + number, exponent := rounded.AsCanonicalBase1024Bytes(out) + suffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format) + return number, suffix + } +} + +// AsInt64 returns a representation of the current value as an int64 if a fast conversion +// is possible. If false is returned, callers must use the inf.Dec form of this quantity. +func (q *Quantity) AsInt64() (int64, bool) { + if q.d.Dec != nil { + return 0, false + } + return q.i.AsInt64() +} + +// ToDec promotes the quantity in place to use an inf.Dec representation and returns itself. +func (q *Quantity) ToDec() *Quantity { + if q.d.Dec == nil { + q.d.Dec = q.i.AsDec() + q.i = int64Amount{} + } + return q +} + +// AsDec returns the quantity as represented by a scaled inf.Dec. +func (q *Quantity) AsDec() *inf.Dec { + if q.d.Dec != nil { + return q.d.Dec + } + q.d.Dec = q.i.AsDec() + q.i = int64Amount{} + return q.d.Dec +} + +// AsCanonicalBytes returns the canonical byte representation of this quantity as a mantissa +// and base 10 exponent. The out byte slice may be passed to the method to avoid an extra +// allocation. +func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { + if q.d.Dec != nil { + return q.d.AsCanonicalBytes(out) + } + return q.i.AsCanonicalBytes(out) +} + +// IsZero returns true if the quantity is equal to zero. +func (q *Quantity) IsZero() bool { + if q.d.Dec != nil { + return q.d.Dec.Sign() == 0 + } + return q.i.value == 0 +} + +// Sign returns 0 if the quantity is zero, -1 if the quantity is less than zero, or 1 if the +// quantity is greater than zero. +func (q *Quantity) Sign() int { + if q.d.Dec != nil { + return q.d.Dec.Sign() + } + return q.i.Sign() +} + +// AsScaled returns the current value, rounded up to the provided scale, and returns +// false if the scale resulted in a loss of precision. +func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) { + if q.d.Dec != nil { + return q.d.AsScale(scale) + } + return q.i.AsScale(scale) +} + +// RoundUp updates the quantity to the provided scale, ensuring that the value is at +// least 1. False is returned if the rounding operation resulted in a loss of precision. +// Negative numbers are rounded away from zero (-9 scale 1 rounds to -10). +func (q *Quantity) RoundUp(scale Scale) bool { + if q.d.Dec != nil { + q.s = "" + d, exact := q.d.AsScale(scale) + q.d = d + return exact + } + // avoid clearing the string value if we have already calculated it + if q.i.scale >= scale { + return true + } + q.s = "" + i, exact := q.i.AsScale(scale) + q.i = i + return exact +} + +// Add adds the provide y quantity to the current value. If the current value is zero, +// the format of the quantity will be updated to the format of y. +func (q *Quantity) Add(y Quantity) { + q.s = "" + if q.d.Dec == nil && y.d.Dec == nil { + if q.i.value == 0 { + q.Format = y.Format + } + if q.i.Add(y.i) { + return + } + } else if q.IsZero() { + q.Format = y.Format + } + q.ToDec().d.Dec.Add(q.d.Dec, y.AsDec()) +} + +// Sub subtracts the provided quantity from the current value in place. If the current +// value is zero, the format of the quantity will be updated to the format of y. +func (q *Quantity) Sub(y Quantity) { + q.s = "" + if q.IsZero() { + q.Format = y.Format + } + if q.d.Dec == nil && y.d.Dec == nil && q.i.Sub(y.i) { + return + } + q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec()) +} + +// Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the +// quantity is greater than y. +func (q *Quantity) Cmp(y Quantity) int { + if q.d.Dec == nil && y.d.Dec == nil { + return q.i.Cmp(y.i) + } + return q.AsDec().Cmp(y.AsDec()) +} + +// CmpInt64 returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the +// quantity is greater than y. +func (q *Quantity) CmpInt64(y int64) int { + if q.d.Dec != nil { + return q.d.Dec.Cmp(inf.NewDec(y, inf.Scale(0))) + } + return q.i.Cmp(int64Amount{value: y}) +} + +// Neg sets quantity to be the negative value of itself. +func (q *Quantity) Neg() { + q.s = "" + if q.d.Dec == nil { + q.i.value = -q.i.value + return + } + q.d.Dec.Neg(q.d.Dec) +} + +// int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation +// of most Quantity values. +const int64QuantityExpectedBytes = 18 + +// String formats the Quantity as a string, caching the result if not calculated. +// String is an expensive operation and caching this result significantly reduces the cost of +// normal parse / marshal operations on Quantity. +func (q *Quantity) String() string { + if len(q.s) == 0 { + result := make([]byte, 0, int64QuantityExpectedBytes) + number, suffix := q.CanonicalizeBytes(result) + number = append(number, suffix...) + q.s = string(number) + } + return q.s +} + +// MarshalJSON implements the json.Marshaller interface. +func (q Quantity) MarshalJSON() ([]byte, error) { + if len(q.s) > 0 { + out := make([]byte, len(q.s)+2) + out[0], out[len(out)-1] = '"', '"' + copy(out[1:], q.s) + return out, nil + } + result := make([]byte, int64QuantityExpectedBytes, int64QuantityExpectedBytes) + result[0] = '"' + number, suffix := q.CanonicalizeBytes(result[1:1]) + // if the same slice was returned to us that we passed in, avoid another allocation by copying number into + // the source slice and returning that + if len(number) > 0 && &number[0] == &result[1] && (len(number)+len(suffix)+2) <= int64QuantityExpectedBytes { + number = append(number, suffix...) + number = append(number, '"') + return result[:1+len(number)], nil + } + // if CanonicalizeBytes needed more space than our slice provided, we may need to allocate again so use + // append + result = result[:1] + result = append(result, number...) + result = append(result, suffix...) + result = append(result, '"') + return result, nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +// TODO: Remove support for leading/trailing whitespace +func (q *Quantity) UnmarshalJSON(value []byte) error { + l := len(value) + if l == 4 && bytes.Equal(value, []byte("null")) { + q.d.Dec = nil + q.i = int64Amount{} + return nil + } + if l >= 2 && value[0] == '"' && value[l-1] == '"' { + value = value[1 : l-1] + } + + parsed, err := ParseQuantity(strings.TrimSpace(string(value))) + if err != nil { + return err + } + + // This copy is safe because parsed will not be referred to again. + *q = parsed + return nil +} + +// NewQuantity returns a new Quantity representing the given +// value in the given format. +func NewQuantity(value int64, format Format) *Quantity { + return &Quantity{ + i: int64Amount{value: value}, + Format: format, + } +} + +// NewMilliQuantity returns a new Quantity representing the given +// value * 1/1000 in the given format. Note that BinarySI formatting +// will round fractional values, and will be changed to DecimalSI for +// values x where (-1 < x < 1) && (x != 0). +func NewMilliQuantity(value int64, format Format) *Quantity { + return &Quantity{ + i: int64Amount{value: value, scale: -3}, + Format: format, + } +} + +// NewScaledQuantity returns a new Quantity representing the given +// value * 10^scale in DecimalSI format. +func NewScaledQuantity(value int64, scale Scale) *Quantity { + return &Quantity{ + i: int64Amount{value: value, scale: scale}, + Format: DecimalSI, + } +} + +// Value returns the value of q; any fractional part will be lost. +func (q *Quantity) Value() int64 { + return q.ScaledValue(0) +} + +// MilliValue returns the value of ceil(q * 1000); this could overflow an int64; +// if that's a concern, call Value() first to verify the number is small enough. +func (q *Quantity) MilliValue() int64 { + return q.ScaledValue(Milli) +} + +// ScaledValue returns the value of ceil(q * 10^scale); this could overflow an int64. +// To detect overflow, call Value() first and verify the expected magnitude. +func (q *Quantity) ScaledValue(scale Scale) int64 { + if q.d.Dec == nil { + i, _ := q.i.AsScaledInt64(scale) + return i + } + dec := q.d.Dec + return scaledValue(dec.UnscaledBig(), int(dec.Scale()), int(scale.infScale())) +} + +// Set sets q's value to be value. +func (q *Quantity) Set(value int64) { + q.SetScaled(value, 0) +} + +// SetMilli sets q's value to be value * 1/1000. +func (q *Quantity) SetMilli(value int64) { + q.SetScaled(value, Milli) +} + +// SetScaled sets q's value to be value * 10^scale +func (q *Quantity) SetScaled(value int64, scale Scale) { + q.s = "" + q.d.Dec = nil + q.i = int64Amount{value: value, scale: scale} +} + +// Copy is a convenience function that makes a deep copy for you. Non-deep +// copies of quantities share pointers and you will regret that. +func (q *Quantity) Copy() *Quantity { + if q.d.Dec == nil { + return &Quantity{ + s: q.s, + i: q.i, + Format: q.Format, + } + } + tmp := &inf.Dec{} + return &Quantity{ + s: q.s, + d: infDecAmount{tmp.Set(q.d.Dec)}, + Format: q.Format, + } +} + +// qFlag is a helper type for the Flag function +type qFlag struct { + dest *Quantity +} + +// Sets the value of the internal Quantity. (used by flag & pflag) +func (qf qFlag) Set(val string) error { + q, err := ParseQuantity(val) + if err != nil { + return err + } + // This copy is OK because q will not be referenced again. + *qf.dest = q + return nil +} + +// Converts the value of the internal Quantity to a string. (used by flag & pflag) +func (qf qFlag) String() string { + return qf.dest.String() +} + +// States the type of flag this is (Quantity). (used by pflag) +func (qf qFlag) Type() string { + return "quantity" +} + +// QuantityFlag is a helper that makes a quantity flag (using standard flag package). +// Will panic if defaultValue is not a valid quantity. +func QuantityFlag(flagName, defaultValue, description string) *Quantity { + q := MustParse(defaultValue) + flag.Var(NewQuantityFlagValue(&q), flagName, description) + return &q +} + +// NewQuantityFlagValue returns an object that can be used to back a flag, +// pointing at the given Quantity variable. +func NewQuantityFlagValue(q *Quantity) flag.Value { + return qFlag{q} +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go new file mode 100644 index 000000000..74dfb4e4b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go @@ -0,0 +1,284 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "io" + + "github.com/gogo/protobuf/proto" +) + +var _ proto.Sizer = &Quantity{} + +func (m *Quantity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +// MarshalTo is a customized version of the generated Protobuf unmarshaler for a struct +// with a single string field. +func (m *Quantity) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + + data[i] = 0xa + i++ + // BEGIN CUSTOM MARSHAL + out := m.String() + i = encodeVarintGenerated(data, i, uint64(len(out))) + i += copy(data[i:], out) + // END CUSTOM MARSHAL + + return i, nil +} + +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} + +func (m *Quantity) Size() (n int) { + var l int + _ = l + + // BEGIN CUSTOM SIZE + l = len(m.String()) + // END CUSTOM SIZE + + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// Unmarshal is a customized version of the generated Protobuf unmarshaler for a struct +// with a single string field. +func (m *Quantity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Quantity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Quantity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field String_", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + + // BEGIN CUSTOM DECODE + p, err := ParseQuantity(s) + if err != nil { + return err + } + *m = p + // END CUSTOM DECODE + + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go b/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go new file mode 100644 index 000000000..55e177b0e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go @@ -0,0 +1,95 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "math" + "math/big" + "sync" +) + +var ( + // A sync pool to reduce allocation. + intPool sync.Pool + maxInt64 = big.NewInt(math.MaxInt64) +) + +func init() { + intPool.New = func() interface{} { + return &big.Int{} + } +} + +// scaledValue scales given unscaled value from scale to new Scale and returns +// an int64. It ALWAYS rounds up the result when scale down. The final result might +// overflow. +// +// scale, newScale represents the scale of the unscaled decimal. +// The mathematical value of the decimal is unscaled * 10**(-scale). +func scaledValue(unscaled *big.Int, scale, newScale int) int64 { + dif := scale - newScale + if dif == 0 { + return unscaled.Int64() + } + + // Handle scale up + // This is an easy case, we do not need to care about rounding and overflow. + // If any intermediate operation causes overflow, the result will overflow. + if dif < 0 { + return unscaled.Int64() * int64(math.Pow10(-dif)) + } + + // Handle scale down + // We have to be careful about the intermediate operations. + + // fast path when unscaled < max.Int64 and exp(10,dif) < max.Int64 + const log10MaxInt64 = 19 + if unscaled.Cmp(maxInt64) < 0 && dif < log10MaxInt64 { + divide := int64(math.Pow10(dif)) + result := unscaled.Int64() / divide + mod := unscaled.Int64() % divide + if mod != 0 { + return result + 1 + } + return result + } + + // We should only convert back to int64 when getting the result. + divisor := intPool.Get().(*big.Int) + exp := intPool.Get().(*big.Int) + result := intPool.Get().(*big.Int) + defer func() { + intPool.Put(divisor) + intPool.Put(exp) + intPool.Put(result) + }() + + // divisor = 10^(dif) + // TODO: create loop up table if exp costs too much. + divisor.Exp(bigTen, exp.SetInt64(int64(dif)), nil) + // reuse exp + remainder := exp + + // result = unscaled / divisor + // remainder = unscaled % divisor + result.DivMod(unscaled, divisor, remainder) + if remainder.Sign() != 0 { + return result.Int64() + 1 + } + + return result.Int64() +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go new file mode 100644 index 000000000..5ed7abe66 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go @@ -0,0 +1,198 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "strconv" +) + +type suffix string + +// suffixer can interpret and construct suffixes. +type suffixer interface { + interpret(suffix) (base, exponent int32, fmt Format, ok bool) + construct(base, exponent int32, fmt Format) (s suffix, ok bool) + constructBytes(base, exponent int32, fmt Format) (s []byte, ok bool) +} + +// quantitySuffixer handles suffixes for all three formats that quantity +// can handle. +var quantitySuffixer = newSuffixer() + +type bePair struct { + base, exponent int32 +} + +type listSuffixer struct { + suffixToBE map[suffix]bePair + beToSuffix map[bePair]suffix + beToSuffixBytes map[bePair][]byte +} + +func (ls *listSuffixer) addSuffix(s suffix, pair bePair) { + if ls.suffixToBE == nil { + ls.suffixToBE = map[suffix]bePair{} + } + if ls.beToSuffix == nil { + ls.beToSuffix = map[bePair]suffix{} + } + if ls.beToSuffixBytes == nil { + ls.beToSuffixBytes = map[bePair][]byte{} + } + ls.suffixToBE[s] = pair + ls.beToSuffix[pair] = s + ls.beToSuffixBytes[pair] = []byte(s) +} + +func (ls *listSuffixer) lookup(s suffix) (base, exponent int32, ok bool) { + pair, ok := ls.suffixToBE[s] + if !ok { + return 0, 0, false + } + return pair.base, pair.exponent, true +} + +func (ls *listSuffixer) construct(base, exponent int32) (s suffix, ok bool) { + s, ok = ls.beToSuffix[bePair{base, exponent}] + return +} + +func (ls *listSuffixer) constructBytes(base, exponent int32) (s []byte, ok bool) { + s, ok = ls.beToSuffixBytes[bePair{base, exponent}] + return +} + +type suffixHandler struct { + decSuffixes listSuffixer + binSuffixes listSuffixer +} + +type fastLookup struct { + *suffixHandler +} + +func (l fastLookup) interpret(s suffix) (base, exponent int32, format Format, ok bool) { + switch s { + case "": + return 10, 0, DecimalSI, true + case "n": + return 10, -9, DecimalSI, true + case "u": + return 10, -6, DecimalSI, true + case "m": + return 10, -3, DecimalSI, true + case "k": + return 10, 3, DecimalSI, true + case "M": + return 10, 6, DecimalSI, true + case "G": + return 10, 9, DecimalSI, true + } + return l.suffixHandler.interpret(s) +} + +func newSuffixer() suffixer { + sh := &suffixHandler{} + + // IMPORTANT: if you change this section you must change fastLookup + + sh.binSuffixes.addSuffix("Ki", bePair{2, 10}) + sh.binSuffixes.addSuffix("Mi", bePair{2, 20}) + sh.binSuffixes.addSuffix("Gi", bePair{2, 30}) + sh.binSuffixes.addSuffix("Ti", bePair{2, 40}) + sh.binSuffixes.addSuffix("Pi", bePair{2, 50}) + sh.binSuffixes.addSuffix("Ei", bePair{2, 60}) + // Don't emit an error when trying to produce + // a suffix for 2^0. + sh.decSuffixes.addSuffix("", bePair{2, 0}) + + sh.decSuffixes.addSuffix("n", bePair{10, -9}) + sh.decSuffixes.addSuffix("u", bePair{10, -6}) + sh.decSuffixes.addSuffix("m", bePair{10, -3}) + sh.decSuffixes.addSuffix("", bePair{10, 0}) + sh.decSuffixes.addSuffix("k", bePair{10, 3}) + sh.decSuffixes.addSuffix("M", bePair{10, 6}) + sh.decSuffixes.addSuffix("G", bePair{10, 9}) + sh.decSuffixes.addSuffix("T", bePair{10, 12}) + sh.decSuffixes.addSuffix("P", bePair{10, 15}) + sh.decSuffixes.addSuffix("E", bePair{10, 18}) + + return fastLookup{sh} +} + +func (sh *suffixHandler) construct(base, exponent int32, fmt Format) (s suffix, ok bool) { + switch fmt { + case DecimalSI: + return sh.decSuffixes.construct(base, exponent) + case BinarySI: + return sh.binSuffixes.construct(base, exponent) + case DecimalExponent: + if base != 10 { + return "", false + } + if exponent == 0 { + return "", true + } + return suffix("e" + strconv.FormatInt(int64(exponent), 10)), true + } + return "", false +} + +func (sh *suffixHandler) constructBytes(base, exponent int32, format Format) (s []byte, ok bool) { + switch format { + case DecimalSI: + return sh.decSuffixes.constructBytes(base, exponent) + case BinarySI: + return sh.binSuffixes.constructBytes(base, exponent) + case DecimalExponent: + if base != 10 { + return nil, false + } + if exponent == 0 { + return nil, true + } + result := make([]byte, 8, 8) + result[0] = 'e' + number := strconv.AppendInt(result[1:1], int64(exponent), 10) + if &result[1] == &number[0] { + return result[:1+len(number)], true + } + result = append(result[:1], number...) + return result, true + } + return nil, false +} + +func (sh *suffixHandler) interpret(suffix suffix) (base, exponent int32, fmt Format, ok bool) { + // Try lookup tables first + if b, e, ok := sh.decSuffixes.lookup(suffix); ok { + return b, e, DecimalSI, true + } + if b, e, ok := sh.binSuffixes.lookup(suffix); ok { + return b, e, BinarySI, true + } + + if len(suffix) > 1 && (suffix[0] == 'E' || suffix[0] == 'e') { + parsed, err := strconv.ParseInt(string(suffix[1:]), 10, 64) + if err != nil { + return 0, 0, DecimalExponent, false + } + return 10, int32(parsed), DecimalExponent, true + } + + return 0, 0, DecimalExponent, false +} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go new file mode 100644 index 000000000..2c8568c1f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go @@ -0,0 +1,99 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package announced contains tools for announcing API group factories. This is +// distinct from registration (in the 'registered' package) in that it's safe +// to announce every possible group linked in, but only groups requested at +// runtime should be registered. This package contains both a registry, and +// factory code (which was formerly copy-pasta in every install package). +package announced + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" +) + +// APIGroupFactoryRegistry allows for groups and versions to announce themselves, +// which simply makes them available and doesn't take other actions. Later, +// users of the registry can select which groups and versions they'd actually +// like to register with an APIRegistrationManager. +// +// (Right now APIRegistrationManager has separate 'registration' and 'enabled' +// concepts-- APIGroupFactory is going to take over the former function; +// they will overlap untill the refactoring is finished.) +// +// The key is the group name. After initialization, this should be treated as +// read-only. It is implemented as a map from group name to group factory, and +// it is safe to use this knowledge to manually pick out groups to register +// (e.g., for testing). +type APIGroupFactoryRegistry map[string]*GroupMetaFactory + +func (gar APIGroupFactoryRegistry) group(groupName string) *GroupMetaFactory { + gmf, ok := gar[groupName] + if !ok { + gmf = &GroupMetaFactory{VersionArgs: map[string]*GroupVersionFactoryArgs{}} + gar[groupName] = gmf + } + return gmf +} + +// AnnounceGroupVersion adds the particular arguments for this group version to the group factory. +func (gar APIGroupFactoryRegistry) AnnounceGroupVersion(gvf *GroupVersionFactoryArgs) error { + gmf := gar.group(gvf.GroupName) + if _, ok := gmf.VersionArgs[gvf.VersionName]; ok { + return fmt.Errorf("version %q in group %q has already been announced", gvf.VersionName, gvf.GroupName) + } + gmf.VersionArgs[gvf.VersionName] = gvf + return nil +} + +// AnnounceGroup adds the group-wide arguments to the group factory. +func (gar APIGroupFactoryRegistry) AnnounceGroup(args *GroupMetaFactoryArgs) error { + gmf := gar.group(args.GroupName) + if gmf.GroupArgs != nil { + return fmt.Errorf("group %q has already been announced", args.GroupName) + } + gmf.GroupArgs = args + return nil +} + +// RegisterAndEnableAll throws every factory at the specified API registration +// manager, and lets it decide which to register. (If you want to do this a la +// cart, you may look through gar itself-- it's just a map.) +func (gar APIGroupFactoryRegistry) RegisterAndEnableAll(m *registered.APIRegistrationManager, scheme *runtime.Scheme) error { + for groupName, gmf := range gar { + if err := gmf.Register(m); err != nil { + return fmt.Errorf("error registering %v: %v", groupName, err) + } + if err := gmf.Enable(m, scheme); err != nil { + return fmt.Errorf("error enabling %v: %v", groupName, err) + } + } + return nil +} + +// AnnouncePreconstructedFactory announces a factory which you've manually assembled. +// You may call this instead of calling AnnounceGroup and AnnounceGroupVersion. +func (gar APIGroupFactoryRegistry) AnnouncePreconstructedFactory(gmf *GroupMetaFactory) error { + name := gmf.GroupArgs.GroupName + if _, exists := gar[name]; exists { + return fmt.Errorf("the group %q has already been announced.", name) + } + gar[name] = gmf + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go new file mode 100644 index 000000000..c469eebcd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go @@ -0,0 +1,252 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package announced + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apimachinery" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +type SchemeFunc func(*runtime.Scheme) error +type VersionToSchemeFunc map[string]SchemeFunc + +// GroupVersionFactoryArgs contains all the per-version parts of a GroupMetaFactory. +type GroupVersionFactoryArgs struct { + GroupName string + VersionName string + + AddToScheme SchemeFunc +} + +// GroupMetaFactoryArgs contains the group-level args of a GroupMetaFactory. +type GroupMetaFactoryArgs struct { + // GroupName is the name of the API-Group + // + // example: 'servicecatalog.k8s.io' + GroupName string + VersionPreferenceOrder []string + // ImportPrefix is the base go package of the API-Group + // + // example: 'k8s.io/kubernetes/pkg/apis/autoscaling' + ImportPrefix string + // RootScopedKinds are resources that are not namespaced. + RootScopedKinds sets.String // nil is allowed + IgnoredKinds sets.String // nil is allowed + + // May be nil if there are no internal objects. + AddInternalObjectsToScheme SchemeFunc +} + +// NewGroupMetaFactory builds the args for you. This is for if you're +// constructing a factory all at once and not using the registry. +func NewGroupMetaFactory(groupArgs *GroupMetaFactoryArgs, versions VersionToSchemeFunc) *GroupMetaFactory { + gmf := &GroupMetaFactory{ + GroupArgs: groupArgs, + VersionArgs: map[string]*GroupVersionFactoryArgs{}, + } + for v, f := range versions { + gmf.VersionArgs[v] = &GroupVersionFactoryArgs{ + GroupName: groupArgs.GroupName, + VersionName: v, + AddToScheme: f, + } + } + return gmf +} + +// Announce adds this Group factory to the global factory registry. It should +// only be called if you constructed the GroupMetaFactory yourself via +// NewGroupMetaFactory. +// Note that this will panic on an error, since it's expected that you'll be +// calling this at initialization time and any error is a result of a +// programmer importing the wrong set of packages. If this assumption doesn't +// work for you, just call DefaultGroupFactoryRegistry.AnnouncePreconstructedFactory +// yourself. +func (gmf *GroupMetaFactory) Announce(groupFactoryRegistry APIGroupFactoryRegistry) *GroupMetaFactory { + if err := groupFactoryRegistry.AnnouncePreconstructedFactory(gmf); err != nil { + panic(err) + } + return gmf +} + +// GroupMetaFactory has the logic for actually assembling and registering a group. +// +// There are two ways of obtaining one of these. +// 1. You can announce your group and versions separately, and then let the +// GroupFactoryRegistry assemble this object for you. (This allows group and +// versions to be imported separately, without referencing each other, to +// keep import trees small.) +// 2. You can call NewGroupMetaFactory(), which is mostly a drop-in replacement +// for the old, bad way of doing things. You can then call .Announce() to +// announce your constructed factory to any code that would like to do +// things the new, better way. +// +// Note that GroupMetaFactory actually does construct GroupMeta objects, but +// currently it does so in a way that's very entangled with an +// APIRegistrationManager. It's a TODO item to cleanly separate that interface. +type GroupMetaFactory struct { + GroupArgs *GroupMetaFactoryArgs + // map of version name to version factory + VersionArgs map[string]*GroupVersionFactoryArgs + + // assembled by Register() + prioritizedVersionList []schema.GroupVersion +} + +// Register constructs the finalized prioritized version list and sanity checks +// the announced group & versions. Then it calls register. +func (gmf *GroupMetaFactory) Register(m *registered.APIRegistrationManager) error { + if gmf.GroupArgs == nil { + return fmt.Errorf("partially announced groups are not allowed, only got versions: %#v", gmf.VersionArgs) + } + if len(gmf.VersionArgs) == 0 { + return fmt.Errorf("group %v announced but no versions announced", gmf.GroupArgs.GroupName) + } + + pvSet := sets.NewString(gmf.GroupArgs.VersionPreferenceOrder...) + if pvSet.Len() != len(gmf.GroupArgs.VersionPreferenceOrder) { + return fmt.Errorf("preference order for group %v has duplicates: %v", gmf.GroupArgs.GroupName, gmf.GroupArgs.VersionPreferenceOrder) + } + prioritizedVersions := []schema.GroupVersion{} + for _, v := range gmf.GroupArgs.VersionPreferenceOrder { + prioritizedVersions = append( + prioritizedVersions, + schema.GroupVersion{ + Group: gmf.GroupArgs.GroupName, + Version: v, + }, + ) + } + + // Go through versions that weren't explicitly prioritized. + unprioritizedVersions := []schema.GroupVersion{} + for _, v := range gmf.VersionArgs { + if v.GroupName != gmf.GroupArgs.GroupName { + return fmt.Errorf("found %v/%v in group %v?", v.GroupName, v.VersionName, gmf.GroupArgs.GroupName) + } + if pvSet.Has(v.VersionName) { + pvSet.Delete(v.VersionName) + continue + } + unprioritizedVersions = append(unprioritizedVersions, schema.GroupVersion{Group: v.GroupName, Version: v.VersionName}) + } + if len(unprioritizedVersions) > 1 { + glog.Warningf("group %v has multiple unprioritized versions: %#v. They will have an arbitrary preference order!", gmf.GroupArgs.GroupName, unprioritizedVersions) + } + if pvSet.Len() != 0 { + return fmt.Errorf("group %v has versions in the priority list that were never announced: %s", gmf.GroupArgs.GroupName, pvSet) + } + prioritizedVersions = append(prioritizedVersions, unprioritizedVersions...) + m.RegisterVersions(prioritizedVersions) + gmf.prioritizedVersionList = prioritizedVersions + return nil +} + +func (gmf *GroupMetaFactory) newRESTMapper(scheme *runtime.Scheme, externalVersions []schema.GroupVersion, groupMeta *apimachinery.GroupMeta) meta.RESTMapper { + // the list of kinds that are scoped at the root of the api hierarchy + // if a kind is not enumerated here, it is assumed to have a namespace scope + rootScoped := sets.NewString() + if gmf.GroupArgs.RootScopedKinds != nil { + rootScoped = gmf.GroupArgs.RootScopedKinds + } + ignoredKinds := sets.NewString() + if gmf.GroupArgs.IgnoredKinds != nil { + ignoredKinds = gmf.GroupArgs.IgnoredKinds + } + + return meta.NewDefaultRESTMapperFromScheme( + externalVersions, + groupMeta.InterfacesFor, + gmf.GroupArgs.ImportPrefix, + ignoredKinds, + rootScoped, + scheme, + ) +} + +// Enable enables group versions that are allowed, adds methods to the scheme, etc. +func (gmf *GroupMetaFactory) Enable(m *registered.APIRegistrationManager, scheme *runtime.Scheme) error { + externalVersions := []schema.GroupVersion{} + for _, v := range gmf.prioritizedVersionList { + if !m.IsAllowedVersion(v) { + continue + } + externalVersions = append(externalVersions, v) + if err := m.EnableVersions(v); err != nil { + return err + } + gmf.VersionArgs[v.Version].AddToScheme(scheme) + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", gmf.GroupArgs.GroupName) + return nil + } + + if gmf.GroupArgs.AddInternalObjectsToScheme != nil { + gmf.GroupArgs.AddInternalObjectsToScheme(scheme) + } + + preferredExternalVersion := externalVersions[0] + accessor := meta.NewAccessor() + + groupMeta := &apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + SelfLinker: runtime.SelfLinker(accessor), + } + for _, v := range externalVersions { + gvf := gmf.VersionArgs[v.Version] + if err := groupMeta.AddVersionInterfaces( + schema.GroupVersion{Group: gvf.GroupName, Version: gvf.VersionName}, + &meta.VersionInterfaces{ + ObjectConvertor: scheme, + MetadataAccessor: accessor, + }, + ); err != nil { + return err + } + } + groupMeta.InterfacesFor = groupMeta.DefaultInterfacesFor + groupMeta.RESTMapper = gmf.newRESTMapper(scheme, externalVersions, groupMeta) + + if err := m.RegisterGroup(*groupMeta); err != nil { + return err + } + return nil +} + +// RegisterAndEnable is provided only to allow this code to get added in multiple steps. +// It's really bad that this is called in init() methods, but supporting this +// temporarily lets us do the change incrementally. +func (gmf *GroupMetaFactory) RegisterAndEnable(registry *registered.APIRegistrationManager, scheme *runtime.Scheme) error { + if err := gmf.Register(registry); err != nil { + return err + } + if err := gmf.Enable(registry, scheme); err != nil { + return err + } + + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go new file mode 100644 index 000000000..b238454b2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package apimachinery contains the generic API machinery code that +// is common to both server and clients. +// This package should never import specific API objects. +package apimachinery // import "k8s.io/apimachinery/pkg/apimachinery" diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go new file mode 100644 index 000000000..f2e32c88c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go @@ -0,0 +1,376 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package to keep track of API Versions that can be registered and are enabled in api.Scheme. +package registered + +import ( + "fmt" + "sort" + "strings" + + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apimachinery" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// APIRegistrationManager provides the concept of what API groups are enabled. +// +// TODO: currently, it also provides a "registered" concept. But it's wrong to +// have both concepts in the same object. Therefore the "announced" package is +// going to take over the registered concept. After all the install packages +// are switched to using the announce package instead of this package, then we +// can combine the registered/enabled concepts in this object. Simplifying this +// isn't easy right now because there are so many callers of this package. +type APIRegistrationManager struct { + // registeredGroupVersions stores all API group versions for which RegisterGroup is called. + registeredVersions map[schema.GroupVersion]struct{} + + // thirdPartyGroupVersions are API versions which are dynamically + // registered (and unregistered) via API calls to the apiserver + thirdPartyGroupVersions []schema.GroupVersion + + // enabledVersions represents all enabled API versions. It should be a + // subset of registeredVersions. Please call EnableVersions() to add + // enabled versions. + enabledVersions map[schema.GroupVersion]struct{} + + // map of group meta for all groups. + groupMetaMap map[string]*apimachinery.GroupMeta + + // envRequestedVersions represents the versions requested via the + // KUBE_API_VERSIONS environment variable. The install package of each group + // checks this list before add their versions to the latest package and + // Scheme. This list is small and order matters, so represent as a slice + envRequestedVersions []schema.GroupVersion +} + +// NewAPIRegistrationManager constructs a new manager. The argument ought to be +// the value of the KUBE_API_VERSIONS env var, or a value of this which you +// wish to test. +func NewAPIRegistrationManager(kubeAPIVersions string) (*APIRegistrationManager, error) { + m := &APIRegistrationManager{ + registeredVersions: map[schema.GroupVersion]struct{}{}, + thirdPartyGroupVersions: []schema.GroupVersion{}, + enabledVersions: map[schema.GroupVersion]struct{}{}, + groupMetaMap: map[string]*apimachinery.GroupMeta{}, + envRequestedVersions: []schema.GroupVersion{}, + } + + if len(kubeAPIVersions) != 0 { + for _, version := range strings.Split(kubeAPIVersions, ",") { + gv, err := schema.ParseGroupVersion(version) + if err != nil { + return nil, fmt.Errorf("invalid api version: %s in KUBE_API_VERSIONS: %s.", + version, kubeAPIVersions) + } + m.envRequestedVersions = append(m.envRequestedVersions, gv) + } + } + return m, nil +} + +func NewOrDie(kubeAPIVersions string) *APIRegistrationManager { + m, err := NewAPIRegistrationManager(kubeAPIVersions) + if err != nil { + glog.Fatalf("Could not construct version manager: %v (KUBE_API_VERSIONS=%q)", err, kubeAPIVersions) + } + return m +} + +// RegisterVersions adds the given group versions to the list of registered group versions. +func (m *APIRegistrationManager) RegisterVersions(availableVersions []schema.GroupVersion) { + for _, v := range availableVersions { + m.registeredVersions[v] = struct{}{} + } +} + +// RegisterGroup adds the given group to the list of registered groups. +func (m *APIRegistrationManager) RegisterGroup(groupMeta apimachinery.GroupMeta) error { + groupName := groupMeta.GroupVersion.Group + if _, found := m.groupMetaMap[groupName]; found { + return fmt.Errorf("group %q is already registered in groupsMap: %v", groupName, m.groupMetaMap) + } + m.groupMetaMap[groupName] = &groupMeta + return nil +} + +// EnableVersions adds the versions for the given group to the list of enabled versions. +// Note that the caller should call RegisterGroup before calling this method. +// The caller of this function is responsible to add the versions to scheme and RESTMapper. +func (m *APIRegistrationManager) EnableVersions(versions ...schema.GroupVersion) error { + var unregisteredVersions []schema.GroupVersion + for _, v := range versions { + if _, found := m.registeredVersions[v]; !found { + unregisteredVersions = append(unregisteredVersions, v) + } + m.enabledVersions[v] = struct{}{} + } + if len(unregisteredVersions) != 0 { + return fmt.Errorf("Please register versions before enabling them: %v", unregisteredVersions) + } + return nil +} + +// IsAllowedVersion returns if the version is allowed by the KUBE_API_VERSIONS +// environment variable. If the environment variable is empty, then it always +// returns true. +func (m *APIRegistrationManager) IsAllowedVersion(v schema.GroupVersion) bool { + if len(m.envRequestedVersions) == 0 { + return true + } + for _, envGV := range m.envRequestedVersions { + if v == envGV { + return true + } + } + return false +} + +// IsEnabledVersion returns if a version is enabled. +func (m *APIRegistrationManager) IsEnabledVersion(v schema.GroupVersion) bool { + _, found := m.enabledVersions[v] + return found +} + +// EnabledVersions returns all enabled versions. Groups are randomly ordered, but versions within groups +// are priority order from best to worst +func (m *APIRegistrationManager) EnabledVersions() []schema.GroupVersion { + ret := []schema.GroupVersion{} + for _, groupMeta := range m.groupMetaMap { + for _, version := range groupMeta.GroupVersions { + if m.IsEnabledVersion(version) { + ret = append(ret, version) + } + } + } + return ret +} + +// EnabledVersionsForGroup returns all enabled versions for a group in order of best to worst +func (m *APIRegistrationManager) EnabledVersionsForGroup(group string) []schema.GroupVersion { + groupMeta, ok := m.groupMetaMap[group] + if !ok { + return []schema.GroupVersion{} + } + + ret := []schema.GroupVersion{} + for _, version := range groupMeta.GroupVersions { + if m.IsEnabledVersion(version) { + ret = append(ret, version) + } + } + return ret +} + +// Group returns the metadata of a group if the group is registered, otherwise +// an error is returned. +func (m *APIRegistrationManager) Group(group string) (*apimachinery.GroupMeta, error) { + groupMeta, found := m.groupMetaMap[group] + if !found { + return nil, fmt.Errorf("group %v has not been registered", group) + } + groupMetaCopy := *groupMeta + return &groupMetaCopy, nil +} + +// IsRegistered takes a string and determines if it's one of the registered groups +func (m *APIRegistrationManager) IsRegistered(group string) bool { + _, found := m.groupMetaMap[group] + return found +} + +// IsRegisteredVersion returns if a version is registered. +func (m *APIRegistrationManager) IsRegisteredVersion(v schema.GroupVersion) bool { + _, found := m.registeredVersions[v] + return found +} + +// RegisteredGroupVersions returns all registered group versions. +func (m *APIRegistrationManager) RegisteredGroupVersions() []schema.GroupVersion { + ret := []schema.GroupVersion{} + for groupVersion := range m.registeredVersions { + ret = append(ret, groupVersion) + } + return ret +} + +// IsThirdPartyAPIGroupVersion returns true if the api version is a user-registered group/version. +func (m *APIRegistrationManager) IsThirdPartyAPIGroupVersion(gv schema.GroupVersion) bool { + for ix := range m.thirdPartyGroupVersions { + if m.thirdPartyGroupVersions[ix] == gv { + return true + } + } + return false +} + +// AddThirdPartyAPIGroupVersions sets the list of third party versions, +// registers them in the API machinery and enables them. +// Skips GroupVersions that are already registered. +// Returns the list of GroupVersions that were skipped. +func (m *APIRegistrationManager) AddThirdPartyAPIGroupVersions(gvs ...schema.GroupVersion) []schema.GroupVersion { + filteredGVs := []schema.GroupVersion{} + skippedGVs := []schema.GroupVersion{} + for ix := range gvs { + if !m.IsRegisteredVersion(gvs[ix]) { + filteredGVs = append(filteredGVs, gvs[ix]) + } else { + glog.V(3).Infof("Skipping %s, because its already registered", gvs[ix].String()) + skippedGVs = append(skippedGVs, gvs[ix]) + } + } + if len(filteredGVs) == 0 { + return skippedGVs + } + m.RegisterVersions(filteredGVs) + m.EnableVersions(filteredGVs...) + m.thirdPartyGroupVersions = append(m.thirdPartyGroupVersions, filteredGVs...) + + return skippedGVs +} + +// InterfacesFor is a union meta.VersionInterfacesFunc func for all registered types +func (m *APIRegistrationManager) InterfacesFor(version schema.GroupVersion) (*meta.VersionInterfaces, error) { + groupMeta, err := m.Group(version.Group) + if err != nil { + return nil, err + } + return groupMeta.InterfacesFor(version) +} + +// TODO: This is an expedient function, because we don't check if a Group is +// supported throughout the code base. We will abandon this function and +// checking the error returned by the Group() function. +func (m *APIRegistrationManager) GroupOrDie(group string) *apimachinery.GroupMeta { + groupMeta, found := m.groupMetaMap[group] + if !found { + if group == "" { + panic("The legacy v1 API is not registered.") + } else { + panic(fmt.Sprintf("Group %s is not registered.", group)) + } + } + groupMetaCopy := *groupMeta + return &groupMetaCopy +} + +// RESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order: +// 1. if KUBE_API_VERSIONS is specified, then KUBE_API_VERSIONS in order, OR +// 1. legacy kube group preferred version, extensions preferred version, metrics perferred version, legacy +// kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version, +// all other groups alphabetical. +func (m *APIRegistrationManager) RESTMapper(versionPatterns ...schema.GroupVersion) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + unionedGroups := sets.NewString() + for enabledVersion := range m.enabledVersions { + if !unionedGroups.Has(enabledVersion.Group) { + unionedGroups.Insert(enabledVersion.Group) + groupMeta := m.groupMetaMap[enabledVersion.Group] + unionMapper = append(unionMapper, groupMeta.RESTMapper) + } + } + + if len(versionPatterns) != 0 { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + for _, versionPriority := range versionPatterns { + resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) + } + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} + } + + if len(m.envRequestedVersions) != 0 { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + + for _, versionPriority := range m.envRequestedVersions { + resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) + } + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} + } + + prioritizedGroups := []string{"", "extensions", "metrics"} + resourcePriority, kindPriority := m.prioritiesForGroups(prioritizedGroups...) + + prioritizedGroupsSet := sets.NewString(prioritizedGroups...) + remainingGroups := sets.String{} + for enabledVersion := range m.enabledVersions { + if !prioritizedGroupsSet.Has(enabledVersion.Group) { + remainingGroups.Insert(enabledVersion.Group) + } + } + + remainingResourcePriority, remainingKindPriority := m.prioritiesForGroups(remainingGroups.List()...) + resourcePriority = append(resourcePriority, remainingResourcePriority...) + kindPriority = append(kindPriority, remainingKindPriority...) + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} +} + +// prioritiesForGroups returns the resource and kind priorities for a PriorityRESTMapper, preferring the preferred version of each group first, +// then any non-preferred version of the group second. +func (m *APIRegistrationManager) prioritiesForGroups(groups ...string) ([]schema.GroupVersionResource, []schema.GroupVersionKind) { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + + for _, group := range groups { + availableVersions := m.EnabledVersionsForGroup(group) + if len(availableVersions) > 0 { + resourcePriority = append(resourcePriority, availableVersions[0].WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, availableVersions[0].WithKind(meta.AnyKind)) + } + } + for _, group := range groups { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource}) + kindPriority = append(kindPriority, schema.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind}) + } + + return resourcePriority, kindPriority +} + +// AllPreferredGroupVersions returns the preferred versions of all registered +// groups in the form of "group1/version1,group2/version2,..." +func (m *APIRegistrationManager) AllPreferredGroupVersions() string { + if len(m.groupMetaMap) == 0 { + return "" + } + var defaults []string + for _, groupMeta := range m.groupMetaMap { + defaults = append(defaults, groupMeta.GroupVersion.String()) + } + sort.Strings(defaults) + return strings.Join(defaults, ",") +} + +// ValidateEnvRequestedVersions returns a list of versions that are requested in +// the KUBE_API_VERSIONS environment variable, but not enabled. +func (m *APIRegistrationManager) ValidateEnvRequestedVersions() []schema.GroupVersion { + var missingVersions []schema.GroupVersion + for _, v := range m.envRequestedVersions { + if _, found := m.enabledVersions[v]; !found { + missingVersions = append(missingVersions, v) + } + } + return missingVersions +} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go new file mode 100644 index 000000000..213e34bc0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go @@ -0,0 +1,87 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apimachinery + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupMeta stores the metadata of a group. +type GroupMeta struct { + // GroupVersion represents the preferred version of the group. + GroupVersion schema.GroupVersion + + // GroupVersions is Group + all versions in that group. + GroupVersions []schema.GroupVersion + + // SelfLinker can set or get the SelfLink field of all API types. + // TODO: when versioning changes, make this part of each API definition. + // TODO(lavalamp): Combine SelfLinker & ResourceVersioner interfaces, force all uses + // to go through the InterfacesFor method below. + SelfLinker runtime.SelfLinker + + // RESTMapper provides the default mapping between REST paths and the objects declared in api.Scheme and all known + // versions. + RESTMapper meta.RESTMapper + + // InterfacesFor returns the default Codec and ResourceVersioner for a given version + // string, or an error if the version is not known. + // TODO: make this stop being a func pointer and always use the default + // function provided below once every place that populates this field has been changed. + InterfacesFor func(version schema.GroupVersion) (*meta.VersionInterfaces, error) + + // InterfacesByVersion stores the per-version interfaces. + InterfacesByVersion map[schema.GroupVersion]*meta.VersionInterfaces +} + +// DefaultInterfacesFor returns the default Codec and ResourceVersioner for a given version +// string, or an error if the version is not known. +// TODO: Remove the "Default" prefix. +func (gm *GroupMeta) DefaultInterfacesFor(version schema.GroupVersion) (*meta.VersionInterfaces, error) { + if v, ok := gm.InterfacesByVersion[version]; ok { + return v, nil + } + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, gm.GroupVersions) +} + +// AddVersionInterfaces adds the given version to the group. Only call during +// init, after that GroupMeta objects should be immutable. Not thread safe. +// (If you use this, be sure to set .InterfacesFor = .DefaultInterfacesFor) +// TODO: remove the "Interfaces" suffix and make this also maintain the +// .GroupVersions member. +func (gm *GroupMeta) AddVersionInterfaces(version schema.GroupVersion, interfaces *meta.VersionInterfaces) error { + if e, a := gm.GroupVersion.Group, version.Group; a != e { + return fmt.Errorf("got a version in group %v, but am in group %v", a, e) + } + if gm.InterfacesByVersion == nil { + gm.InterfacesByVersion = make(map[schema.GroupVersion]*meta.VersionInterfaces) + } + gm.InterfacesByVersion[version] = interfaces + + // TODO: refactor to make the below error not possible, this function + // should *set* GroupVersions rather than depend on it. + for _, v := range gm.GroupVersions { + if v == version { + return nil + } + } + return fmt.Errorf("added a version interface without the corresponding version %v being in the list %#v", version, gm.GroupVersions) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go new file mode 100644 index 000000000..b9a8fd02d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -0,0 +1,264 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func AddConversionFuncs(scheme *runtime.Scheme) error { + return scheme.AddConversionFuncs( + Convert_v1_TypeMeta_To_v1_TypeMeta, + + Convert_unversioned_ListMeta_To_unversioned_ListMeta, + + Convert_intstr_IntOrString_To_intstr_IntOrString, + + Convert_unversioned_Time_To_unversioned_Time, + + Convert_Slice_string_To_unversioned_Time, + + Convert_resource_Quantity_To_resource_Quantity, + + Convert_string_To_labels_Selector, + Convert_labels_Selector_To_string, + + Convert_string_To_fields_Selector, + Convert_fields_Selector_To_string, + + Convert_Pointer_bool_To_bool, + Convert_bool_To_Pointer_bool, + + Convert_Pointer_string_To_string, + Convert_string_To_Pointer_string, + + Convert_Pointer_int64_To_int, + Convert_int_To_Pointer_int64, + + Convert_Pointer_int32_To_int32, + Convert_int32_To_Pointer_int32, + + Convert_Pointer_float64_To_float64, + Convert_float64_To_Pointer_float64, + + Convert_map_to_unversioned_LabelSelector, + Convert_unversioned_LabelSelector_to_map, + + Convert_Slice_string_To_Slice_int32, + ) +} + +func Convert_Pointer_float64_To_float64(in **float64, out *float64, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = float64(**in) + return nil +} + +func Convert_float64_To_Pointer_float64(in *float64, out **float64, s conversion.Scope) error { + temp := float64(*in) + *out = &temp + return nil +} + +func Convert_Pointer_int32_To_int32(in **int32, out *int32, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = int32(**in) + return nil +} + +func Convert_int32_To_Pointer_int32(in *int32, out **int32, s conversion.Scope) error { + temp := int32(*in) + *out = &temp + return nil +} + +func Convert_Pointer_int64_To_int(in **int64, out *int, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = int(**in) + return nil +} + +func Convert_int_To_Pointer_int64(in *int, out **int64, s conversion.Scope) error { + temp := int64(*in) + *out = &temp + return nil +} + +func Convert_Pointer_string_To_string(in **string, out *string, s conversion.Scope) error { + if *in == nil { + *out = "" + return nil + } + *out = **in + return nil +} + +func Convert_string_To_Pointer_string(in *string, out **string, s conversion.Scope) error { + if in == nil { + stringVar := "" + *out = &stringVar + return nil + } + *out = in + return nil +} + +func Convert_Pointer_bool_To_bool(in **bool, out *bool, s conversion.Scope) error { + if *in == nil { + *out = false + return nil + } + *out = **in + return nil +} + +func Convert_bool_To_Pointer_bool(in *bool, out **bool, s conversion.Scope) error { + if in == nil { + boolVar := false + *out = &boolVar + return nil + } + *out = in + return nil +} + +// +k8s:conversion-fn=drop +func Convert_v1_TypeMeta_To_v1_TypeMeta(in, out *TypeMeta, s conversion.Scope) error { + // These values are explicitly not copied + //out.APIVersion = in.APIVersion + //out.Kind = in.Kind + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_unversioned_ListMeta_To_unversioned_ListMeta(in, out *ListMeta, s conversion.Scope) error { + *out = *in + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error { + *out = *in + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_unversioned_Time_To_unversioned_Time(in *Time, out *Time, s conversion.Scope) error { + // Cannot deep copy these, because time.Time has unexported fields. + *out = *in + return nil +} + +// Convert_Slice_string_To_unversioned_Time allows converting a URL query parameter value +func Convert_Slice_string_To_unversioned_Time(input *[]string, out *Time, s conversion.Scope) error { + str := "" + if len(*input) > 0 { + str = (*input)[0] + } + return out.UnmarshalQueryParameter(str) +} + +func Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error { + selector, err := labels.Parse(*in) + if err != nil { + return err + } + *out = selector + return nil +} + +func Convert_string_To_fields_Selector(in *string, out *fields.Selector, s conversion.Scope) error { + selector, err := fields.ParseSelector(*in) + if err != nil { + return err + } + *out = selector + return nil +} + +func Convert_labels_Selector_To_string(in *labels.Selector, out *string, s conversion.Scope) error { + if *in == nil { + return nil + } + *out = (*in).String() + return nil +} + +func Convert_fields_Selector_To_string(in *fields.Selector, out *string, s conversion.Scope) error { + if *in == nil { + return nil + } + *out = (*in).String() + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_resource_Quantity_To_resource_Quantity(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error { + *out = *in + return nil +} + +func Convert_map_to_unversioned_LabelSelector(in *map[string]string, out *LabelSelector, s conversion.Scope) error { + if in == nil { + return nil + } + out = new(LabelSelector) + for labelKey, labelValue := range *in { + AddLabelToSelector(out, labelKey, labelValue) + } + return nil +} + +func Convert_unversioned_LabelSelector_to_map(in *LabelSelector, out *map[string]string, s conversion.Scope) error { + var err error + *out, err = LabelSelectorAsMap(in) + return err +} + +// Convert_Slice_string_To_Slice_int32 converts multiple query parameters or +// a single query parameter with a comma delimited value to multiple int32. +// This is used for port forwarding which needs the ports as int32. +func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversion.Scope) error { + for _, s := range *in { + for _, v := range strings.Split(s, ",") { + x, err := strconv.ParseUint(v, 10, 16) + if err != nil { + return fmt.Errorf("cannot convert to []int32: %v", err) + } + *out = append(*out, int32(x)) + } + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go new file mode 100644 index 000000000..52273240f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=meta.k8s.io +package v1 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go new file mode 100644 index 000000000..fea458dfb --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go @@ -0,0 +1,47 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" +) + +// Duration is a wrapper around time.Duration which supports correct +// marshaling to YAML and JSON. In particular, it marshals into strings, which +// can be used as map keys in json. +type Duration struct { + time.Duration `protobuf:"varint,1,opt,name=duration,casttype=time.Duration"` +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (d *Duration) UnmarshalJSON(b []byte) error { + var str string + json.Unmarshal(b, &str) + + pd, err := time.ParseDuration(str) + if err != nil { + return err + } + d.Duration = pd + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.Duration.String()) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go new file mode 100644 index 000000000..9e2b289da --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -0,0 +1,7516 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto + + It has these top-level messages: + APIGroup + APIGroupList + APIResource + APIResourceList + APIVersions + DeleteOptions + Duration + ExportOptions + GetOptions + GroupKind + GroupResource + GroupVersion + GroupVersionForDiscovery + GroupVersionKind + GroupVersionResource + Initializer + Initializers + LabelSelector + LabelSelectorRequirement + ListMeta + ListOptions + MicroTime + ObjectMeta + OwnerReference + Preconditions + RootPaths + ServerAddressByClientCIDR + Status + StatusCause + StatusDetails + Time + Timestamp + TypeMeta + Verbs + WatchEvent +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import time "time" +import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *APIGroup) Reset() { *m = APIGroup{} } +func (*APIGroup) ProtoMessage() {} +func (*APIGroup) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *APIGroupList) Reset() { *m = APIGroupList{} } +func (*APIGroupList) ProtoMessage() {} +func (*APIGroupList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *APIResource) Reset() { *m = APIResource{} } +func (*APIResource) ProtoMessage() {} +func (*APIResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *APIResourceList) Reset() { *m = APIResourceList{} } +func (*APIResourceList) ProtoMessage() {} +func (*APIResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *APIVersions) Reset() { *m = APIVersions{} } +func (*APIVersions) ProtoMessage() {} +func (*APIVersions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } +func (*DeleteOptions) ProtoMessage() {} +func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *ExportOptions) Reset() { *m = ExportOptions{} } +func (*ExportOptions) ProtoMessage() {} +func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *GetOptions) Reset() { *m = GetOptions{} } +func (*GetOptions) ProtoMessage() {} +func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *GroupKind) Reset() { *m = GroupKind{} } +func (*GroupKind) ProtoMessage() {} +func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *GroupResource) Reset() { *m = GroupResource{} } +func (*GroupResource) ProtoMessage() {} +func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *GroupVersion) Reset() { *m = GroupVersion{} } +func (*GroupVersion) ProtoMessage() {} +func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } +func (*GroupVersionForDiscovery) ProtoMessage() {} +func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{12} +} + +func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } +func (*GroupVersionKind) ProtoMessage() {} +func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } +func (*GroupVersionResource) ProtoMessage() {} +func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *Initializer) Reset() { *m = Initializer{} } +func (*Initializer) ProtoMessage() {} +func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *Initializers) Reset() { *m = Initializers{} } +func (*Initializers) ProtoMessage() {} +func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (*LabelSelector) ProtoMessage() {} +func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } + +func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } +func (*LabelSelectorRequirement) ProtoMessage() {} +func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{18} +} + +func (m *ListMeta) Reset() { *m = ListMeta{} } +func (*ListMeta) ProtoMessage() {} +func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *ListOptions) Reset() { *m = ListOptions{} } +func (*ListOptions) ProtoMessage() {} +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } + +func (m *MicroTime) Reset() { *m = MicroTime{} } +func (*MicroTime) ProtoMessage() {} +func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } + +func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } +func (*ObjectMeta) ProtoMessage() {} +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *OwnerReference) Reset() { *m = OwnerReference{} } +func (*OwnerReference) ProtoMessage() {} +func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } + +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } + +func (m *RootPaths) Reset() { *m = RootPaths{} } +func (*RootPaths) ProtoMessage() {} +func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } +func (*ServerAddressByClientCIDR) ProtoMessage() {} +func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{26} +} + +func (m *Status) Reset() { *m = Status{} } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } + +func (m *StatusCause) Reset() { *m = StatusCause{} } +func (*StatusCause) ProtoMessage() {} +func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } + +func (m *StatusDetails) Reset() { *m = StatusDetails{} } +func (*StatusDetails) ProtoMessage() {} +func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } + +func (m *Time) Reset() { *m = Time{} } +func (*Time) ProtoMessage() {} +func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } + +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } + +func (m *Verbs) Reset() { *m = Verbs{} } +func (*Verbs) ProtoMessage() {} +func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } + +func (m *WatchEvent) Reset() { *m = WatchEvent{} } +func (*WatchEvent) ProtoMessage() {} +func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } + +func init() { + proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") + proto.RegisterType((*APIGroupList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroupList") + proto.RegisterType((*APIResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResource") + proto.RegisterType((*APIResourceList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResourceList") + proto.RegisterType((*APIVersions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIVersions") + proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") + proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") + proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions") + proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") + proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") + proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource") + proto.RegisterType((*GroupVersion)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersion") + proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery") + proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind") + proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource") + proto.RegisterType((*Initializer)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializer") + proto.RegisterType((*Initializers)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializers") + proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector") + proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement") + proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") + proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions") + proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") + proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") + proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") + proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") + proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths") + proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR") + proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status") + proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause") + proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails") + proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time") + proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp") + proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta") + proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs") + proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent") +} +func (m *APIGroup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIGroup) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if len(m.Versions) > 0 { + for _, msg := range m.Versions { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PreferredVersion.Size())) + n1, err := m.PreferredVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, msg := range m.ServerAddressByClientCIDRs { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *APIGroupList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIGroupList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Groups) > 0 { + for _, msg := range m.Groups { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *APIResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 + i++ + if m.Namespaced { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + if m.Verbs != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Verbs.Size())) + n2, err := m.Verbs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName))) + i += copy(dAtA[i:], m.SingularName) + if len(m.Categories) > 0 { + for _, s := range m.Categories { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *APIResourceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIResourceList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i += copy(dAtA[i:], m.GroupVersion) + if len(m.APIResources) > 0 { + for _, msg := range m.APIResources { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *APIVersions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIVersions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Versions) > 0 { + for _, s := range m.Versions { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, msg := range m.ServerAddressByClientCIDRs { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.GracePeriodSeconds != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Preconditions.Size())) + n3, err := m.Preconditions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.OrphanDependents != nil { + dAtA[i] = 0x18 + i++ + if *m.OrphanDependents { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.PropagationPolicy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) + i += copy(dAtA[i:], *m.PropagationPolicy) + } + return i, nil +} + +func (m *Duration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Duration)) + return i, nil +} + +func (m *ExportOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Export { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x10 + i++ + if m.Exact { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *GetOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x10 + i++ + if m.IncludeUninitialized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *GroupKind) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupKind) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + return i, nil +} + +func (m *GroupResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i += copy(dAtA[i:], m.Resource) + return i, nil +} + +func (m *GroupVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + return i, nil +} + +func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i += copy(dAtA[i:], m.GroupVersion) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + return i, nil +} + +func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + return i, nil +} + +func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i += copy(dAtA[i:], m.Resource) + return i, nil +} + +func (m *Initializer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} + +func (m *Initializers) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Initializers) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Pending) > 0 { + for _, msg := range m.Pending { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Result != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) + n4, err := m.Result.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *LabelSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelSelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MatchLabels) > 0 { + keysForMatchLabels := make([]string, 0, len(m.MatchLabels)) + for k := range m.MatchLabels { + keysForMatchLabels = append(keysForMatchLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) + for _, k := range keysForMatchLabels { + dAtA[i] = 0xa + i++ + v := m.MatchLabels[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i += copy(dAtA[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i += copy(dAtA[i:], m.SelfLink) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + return i, nil +} + +func (m *ListOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) + i += copy(dAtA[i:], m.LabelSelector) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) + i += copy(dAtA[i:], m.FieldSelector) + dAtA[i] = 0x18 + i++ + if m.Watch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + if m.TimeoutSeconds != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + } + dAtA[i] = 0x30 + i++ + if m.IncludeUninitialized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) + i += copy(dAtA[i:], m.GenerateName) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i += copy(dAtA[i:], m.SelfLink) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) + n5, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.DeletionTimestamp != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) + n6, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.DeletionGracePeriodSeconds != nil { + dAtA[i] = 0x50 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for _, k := range keysForLabels { + dAtA[i] = 0x5a + i++ + v := m.Labels[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for _, k := range keysForAnnotations { + dAtA[i] = 0x62 + i++ + v := m.Annotations[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.OwnerReferences) > 0 { + for _, msg := range m.OwnerReferences { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + dAtA[i] = 0x72 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) + i += copy(dAtA[i:], m.ClusterName) + if m.Initializers != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) + n7, err := m.Initializers.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *OwnerReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + if m.Controller != nil { + dAtA[i] = 0x30 + i++ + if *m.Controller { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.BlockOwnerDeletion != nil { + dAtA[i] = 0x38 + i++ + if *m.BlockOwnerDeletion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Preconditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.UID != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i += copy(dAtA[i:], *m.UID) + } + return i, nil +} + +func (m *RootPaths) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RootPaths) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServerAddressByClientCIDR) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) + i += copy(dAtA[i:], m.ClientCIDR) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServerAddress))) + i += copy(dAtA[i:], m.ServerAddress) + return i, nil +} + +func (m *Status) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Status) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n8, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + if m.Details != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) + n9, err := m.Details.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Code)) + return i, nil +} + +func (m *StatusCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusCause) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Field))) + i += copy(dAtA[i:], m.Field) + return i, nil +} + +func (m *StatusDetails) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + if len(m.Causes) > 0 { + for _, msg := range m.Causes { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + return i, nil +} + +func (m *Timestamp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Nanos)) + return i, nil +} + +func (m *TypeMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + return i, nil +} + +func (m Verbs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m Verbs) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *WatchEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) + n10, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *APIGroup) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.PreferredVersion.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, e := range m.ServerAddressByClientCIDRs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIGroupList) Size() (n int) { + var l int + _ = l + if len(m.Groups) > 0 { + for _, e := range m.Groups { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIResource) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.Verbs != nil { + l = m.Verbs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SingularName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Categories) > 0 { + for _, s := range m.Categories { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIResourceList) Size() (n int) { + var l int + _ = l + l = len(m.GroupVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.APIResources) > 0 { + for _, e := range m.APIResources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIVersions) Size() (n int) { + var l int + _ = l + if len(m.Versions) > 0 { + for _, s := range m.Versions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, e := range m.ServerAddressByClientCIDRs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeleteOptions) Size() (n int) { + var l int + _ = l + if m.GracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + l = m.Preconditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.OrphanDependents != nil { + n += 2 + } + if m.PropagationPolicy != nil { + l = len(*m.PropagationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Duration) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Duration)) + return n +} + +func (m *ExportOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + return n +} + +func (m *GetOptions) Size() (n int) { + var l int + _ = l + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *GroupKind) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupResource) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersion) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionForDiscovery) Size() (n int) { + var l int + _ = l + l = len(m.GroupVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionKind) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionResource) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Initializer) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Initializers) Size() (n int) { + var l int + _ = l + if len(m.Pending) > 0 { + for _, e := range m.Pending { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LabelSelector) Size() (n int) { + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k, v := range m.MatchLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LabelSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ListMeta) Size() (n int) { + var l int + _ = l + l = len(m.SelfLink) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ListOptions) Size() (n int) { + var l int + _ = l + l = len(m.LabelSelector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldSelector) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + n += 2 + return n +} + +func (m *ObjectMeta) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GenerateName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SelfLink) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + l = m.CreationTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeletionTimestamp != nil { + l = m.DeletionTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DeletionGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.OwnerReferences) > 0 { + for _, e := range m.OwnerReferences { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ClusterName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Initializers != nil { + l = m.Initializers.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *OwnerReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.Controller != nil { + n += 2 + } + if m.BlockOwnerDeletion != nil { + n += 2 + } + return n +} + +func (m *Preconditions) Size() (n int) { + var l int + _ = l + if m.UID != nil { + l = len(*m.UID) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RootPaths) Size() (n int) { + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServerAddressByClientCIDR) Size() (n int) { + var l int + _ = l + l = len(m.ClientCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServerAddress) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Status) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + if m.Details != nil { + l = m.Details.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Code)) + return n +} + +func (m *StatusCause) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Field) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatusDetails) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Causes) > 0 { + for _, e := range m.Causes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.RetryAfterSeconds)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Timestamp) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Seconds)) + n += 1 + sovGenerated(uint64(m.Nanos)) + return n +} + +func (m *TypeMeta) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m Verbs) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WatchEvent) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *APIGroup) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIGroup{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, + `PreferredVersion:` + strings.Replace(strings.Replace(this.PreferredVersion.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, + `ServerAddressByClientCIDRs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServerAddressByClientCIDRs), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *APIGroupList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIGroupList{`, + `Groups:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Groups), "APIGroup", "APIGroup", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *APIResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIResource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespaced:` + fmt.Sprintf("%v", this.Namespaced) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Verbs:` + strings.Replace(fmt.Sprintf("%v", this.Verbs), "Verbs", "Verbs", 1) + `,`, + `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, + `SingularName:` + fmt.Sprintf("%v", this.SingularName) + `,`, + `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, + `}`, + }, "") + return s +} +func (this *APIResourceList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIResourceList{`, + `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, + `APIResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.APIResources), "APIResource", "APIResource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteOptions{`, + `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`, + `Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`, + `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, + `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *Duration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Duration{`, + `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, + `}`, + }, "") + return s +} +func (this *ExportOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExportOptions{`, + `Export:` + fmt.Sprintf("%v", this.Export) + `,`, + `Exact:` + fmt.Sprintf("%v", this.Exact) + `,`, + `}`, + }, "") + return s +} +func (this *GetOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetOptions{`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, + `}`, + }, "") + return s +} +func (this *GroupVersionForDiscovery) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupVersionForDiscovery{`, + `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `}`, + }, "") + return s +} +func (this *Initializer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Initializer{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Initializers) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Initializers{`, + `Pending:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Pending), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, + `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "Status", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LabelSelector) String() string { + if this == nil { + return "nil" + } + keysForMatchLabels := make([]string, 0, len(this.MatchLabels)) + for k := range this.MatchLabels { + keysForMatchLabels = append(keysForMatchLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) + mapStringForMatchLabels := "map[string]string{" + for _, k := range keysForMatchLabels { + mapStringForMatchLabels += fmt.Sprintf("%v: %v,", k, this.MatchLabels[k]) + } + mapStringForMatchLabels += "}" + s := strings.Join([]string{`&LabelSelector{`, + `MatchLabels:` + mapStringForMatchLabels + `,`, + `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LabelSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *ListMeta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListMeta{`, + `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `}`, + }, "") + return s +} +func (this *ListOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListOptions{`, + `LabelSelector:` + fmt.Sprintf("%v", this.LabelSelector) + `,`, + `FieldSelector:` + fmt.Sprintf("%v", this.FieldSelector) + `,`, + `Watch:` + fmt.Sprintf("%v", this.Watch) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMeta) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ObjectMeta{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `GenerateName:` + fmt.Sprintf("%v", this.GenerateName) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "Time", 1), `&`, ``, 1) + `,`, + `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "Time", 1) + `,`, + `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + `,`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, + `Initializers:` + strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializers", "Initializers", 1) + `,`, + `}`, + }, "") + return s +} +func (this *OwnerReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OwnerReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Controller:` + valueToStringGenerated(this.Controller) + `,`, + `BlockOwnerDeletion:` + valueToStringGenerated(this.BlockOwnerDeletion) + `,`, + `}`, + }, "") + return s +} +func (this *Preconditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Preconditions{`, + `UID:` + valueToStringGenerated(this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *RootPaths) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RootPaths{`, + `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, + `}`, + }, "") + return s +} +func (this *ServerAddressByClientCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServerAddressByClientCIDR{`, + `ClientCIDR:` + fmt.Sprintf("%v", this.ClientCIDR) + `,`, + `ServerAddress:` + fmt.Sprintf("%v", this.ServerAddress) + `,`, + `}`, + }, "") + return s +} +func (this *Status) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Status{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "StatusDetails", "StatusDetails", 1) + `,`, + `Code:` + fmt.Sprintf("%v", this.Code) + `,`, + `}`, + }, "") + return s +} +func (this *StatusCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatusCause{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Field:` + fmt.Sprintf("%v", this.Field) + `,`, + `}`, + }, "") + return s +} +func (this *StatusDetails) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatusDetails{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Causes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Causes), "StatusCause", "StatusCause", 1), `&`, ``, 1) + `,`, + `RetryAfterSeconds:` + fmt.Sprintf("%v", this.RetryAfterSeconds) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *Timestamp) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Timestamp{`, + `Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`, + `Nanos:` + fmt.Sprintf("%v", this.Nanos) + `,`, + `}`, + }, "") + return s +} +func (this *TypeMeta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypeMeta{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *WatchEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchEvent{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(strings.Replace(this.Object.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *APIGroup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, GroupVersionForDiscovery{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PreferredVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) + if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIGroupList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroupList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroupList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, APIGroup{}) + if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaced", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Namespaced = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Verbs == nil { + m.Verbs = Verbs{} + } + if err := m.Verbs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SingularName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SingularName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIResourceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIResourceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIResourceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIResources = append(m.APIResources, APIResource{}) + if err := m.APIResources[len(m.APIResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIVersions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIVersions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIVersions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) + if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.GracePeriodSeconds = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preconditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Preconditions == nil { + m.Preconditions = &Preconditions{} + } + if err := m.Preconditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OrphanDependents", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OrphanDependents = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PropagationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := DeletionPropagation(dAtA[iNdEx:postIndex]) + m.PropagationPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Duration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= (time.Duration(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Export = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Exact = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeUninitialized = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupKind) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Initializer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Initializer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Initializers) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Initializers: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Initializers: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pending", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pending = append(m.Pending, Initializer{}) + if err := m.Pending[len(m.Pending)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &Status{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.MatchLabels == nil { + m.MatchLabels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.MatchLabels[mapkey] = mapvalue + } else { + var mapvalue string + m.MatchLabels[mapkey] = mapvalue + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = LabelSelectorOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelfLink = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Watch = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeUninitialized = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GenerateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelfLink = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CreationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeletionTimestamp == nil { + m.DeletionTimestamp = &Time{} + } + if err := m.DeletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DeletionGracePeriodSeconds = &v + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Annotations[mapkey] = mapvalue + } else { + var mapvalue string + m.Annotations[mapkey] = mapvalue + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) + if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Initializers == nil { + m.Initializers = &Initializers{} + } + if err := m.Initializers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OwnerReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Controller = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BlockOwnerDeletion = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Preconditions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Preconditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + m.UID = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RootPaths) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RootPaths: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RootPaths: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientCIDR = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Status) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Status: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = StatusReason(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Details == nil { + m.Details = &StatusDetails{} + } + if err := m.Details.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = CauseType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Field = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusDetails) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusDetails: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusDetails: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Causes = append(m.Causes, StatusCause{}) + if err := m.Causes[len(m.Causes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RetryAfterSeconds", wireType) + } + m.RetryAfterSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RetryAfterSeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Timestamp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Verbs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Verbs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Verbs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 2351 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x52, 0x22, 0x45, 0x3e, 0x8a, 0xfa, 0x98, 0xc8, 0x2d, 0x23, 0xb4, 0xa4, 0xb2, 0x29, + 0x02, 0xa5, 0x75, 0xc8, 0x4a, 0x69, 0x03, 0xd7, 0x6d, 0xdd, 0x6a, 0x45, 0xd9, 0x10, 0x62, 0xd9, + 0xc4, 0x28, 0x76, 0x51, 0xd7, 0x28, 0xba, 0x5a, 0x8e, 0xa8, 0xad, 0x96, 0xbb, 0x9b, 0x99, 0xa1, + 0x6c, 0x35, 0x87, 0xe6, 0xd0, 0x02, 0x3d, 0x14, 0x85, 0x8f, 0x3d, 0x15, 0x31, 0xda, 0xbf, 0xa0, + 0x7f, 0x40, 0x4f, 0x05, 0xea, 0x63, 0x80, 0x5e, 0x72, 0x28, 0x88, 0x98, 0x39, 0xf4, 0x14, 0xf4, + 0x2e, 0xa0, 0x40, 0x31, 0xb3, 0xb3, 0x5f, 0xa4, 0x18, 0x2d, 0xe3, 0xa0, 0xc8, 0x49, 0xdc, 0xf7, + 0xf1, 0x7b, 0x6f, 0x66, 0xde, 0xbc, 0xf7, 0xe6, 0x09, 0xf6, 0x4f, 0xae, 0xb1, 0x86, 0xed, 0x35, + 0x4f, 0xfa, 0x87, 0x84, 0xba, 0x84, 0x13, 0xd6, 0x3c, 0x25, 0x6e, 0xc7, 0xa3, 0x4d, 0xc5, 0x30, + 0x7d, 0xbb, 0x67, 0x5a, 0xc7, 0xb6, 0x4b, 0xe8, 0x59, 0xd3, 0x3f, 0xe9, 0x0a, 0x02, 0x6b, 0xf6, + 0x08, 0x37, 0x9b, 0xa7, 0x9b, 0xcd, 0x2e, 0x71, 0x09, 0x35, 0x39, 0xe9, 0x34, 0x7c, 0xea, 0x71, + 0x0f, 0x7d, 0x23, 0xd0, 0x6a, 0x24, 0xb5, 0x1a, 0xfe, 0x49, 0x57, 0x10, 0x58, 0x43, 0x68, 0x35, + 0x4e, 0x37, 0xd7, 0xde, 0xe8, 0xda, 0xfc, 0xb8, 0x7f, 0xd8, 0xb0, 0xbc, 0x5e, 0xb3, 0xeb, 0x75, + 0xbd, 0xa6, 0x54, 0x3e, 0xec, 0x1f, 0xc9, 0x2f, 0xf9, 0x21, 0x7f, 0x05, 0xa0, 0x6b, 0x13, 0x5d, + 0xa1, 0x7d, 0x97, 0xdb, 0x3d, 0x32, 0xea, 0xc5, 0xda, 0x5b, 0x97, 0x29, 0x30, 0xeb, 0x98, 0xf4, + 0xcc, 0x31, 0xbd, 0x37, 0x27, 0xe9, 0xf5, 0xb9, 0xed, 0x34, 0x6d, 0x97, 0x33, 0x4e, 0x47, 0x95, + 0xf4, 0x7f, 0xcc, 0x42, 0x71, 0xbb, 0xbd, 0x77, 0x8b, 0x7a, 0x7d, 0x1f, 0xad, 0xc3, 0x9c, 0x6b, + 0xf6, 0x48, 0x55, 0x5b, 0xd7, 0x36, 0x4a, 0xc6, 0xc2, 0xb3, 0x41, 0x7d, 0x66, 0x38, 0xa8, 0xcf, + 0xdd, 0x31, 0x7b, 0x04, 0x4b, 0x0e, 0x72, 0xa0, 0x78, 0x4a, 0x28, 0xb3, 0x3d, 0x97, 0x55, 0x73, + 0xeb, 0xb3, 0x1b, 0xe5, 0xad, 0x1b, 0x8d, 0x2c, 0x9b, 0xd6, 0x90, 0x06, 0xee, 0x07, 0xaa, 0x37, + 0x3d, 0xda, 0xb2, 0x99, 0xe5, 0x9d, 0x12, 0x7a, 0x66, 0x2c, 0x2b, 0x2b, 0x45, 0xc5, 0x64, 0x38, + 0xb2, 0x80, 0x7e, 0xa3, 0xc1, 0xb2, 0x4f, 0xc9, 0x11, 0xa1, 0x94, 0x74, 0x14, 0xbf, 0x3a, 0xbb, + 0xae, 0x7d, 0x01, 0x66, 0xab, 0xca, 0xec, 0x72, 0x7b, 0x04, 0x1f, 0x8f, 0x59, 0x44, 0x7f, 0xd6, + 0x60, 0x8d, 0x11, 0x7a, 0x4a, 0xe8, 0x76, 0xa7, 0x43, 0x09, 0x63, 0xc6, 0xd9, 0x8e, 0x63, 0x13, + 0x97, 0xef, 0xec, 0xb5, 0x30, 0xab, 0xce, 0xc9, 0x7d, 0xf8, 0x51, 0x36, 0x87, 0x0e, 0x26, 0xe1, + 0x18, 0xba, 0xf2, 0x68, 0x6d, 0xa2, 0x08, 0xc3, 0x9f, 0xe1, 0x86, 0x7e, 0x04, 0x0b, 0xe1, 0x41, + 0xde, 0xb6, 0x19, 0x47, 0xf7, 0xa1, 0xd0, 0x15, 0x1f, 0xac, 0xaa, 0x49, 0x07, 0x1b, 0xd9, 0x1c, + 0x0c, 0x31, 0x8c, 0x45, 0xe5, 0x4f, 0x41, 0x7e, 0x32, 0xac, 0xd0, 0xf4, 0x4f, 0x73, 0x50, 0xde, + 0x6e, 0xef, 0x61, 0xc2, 0xbc, 0x3e, 0xb5, 0x48, 0x86, 0xa0, 0xd9, 0x02, 0x10, 0x7f, 0x99, 0x6f, + 0x5a, 0xa4, 0x53, 0xcd, 0xad, 0x6b, 0x1b, 0x45, 0x03, 0x29, 0x39, 0xb8, 0x13, 0x71, 0x70, 0x42, + 0x4a, 0xa0, 0x9e, 0xd8, 0x6e, 0x47, 0x9e, 0x76, 0x02, 0xf5, 0x6d, 0xdb, 0xed, 0x60, 0xc9, 0x41, + 0xb7, 0x21, 0x7f, 0x4a, 0xe8, 0xa1, 0xd8, 0x7f, 0x11, 0x10, 0xdf, 0xca, 0xb6, 0xbc, 0xfb, 0x42, + 0xc5, 0x28, 0x0d, 0x07, 0xf5, 0xbc, 0xfc, 0x89, 0x03, 0x10, 0xd4, 0x00, 0x60, 0xc7, 0x1e, 0xe5, + 0xd2, 0x9d, 0x6a, 0x7e, 0x7d, 0x76, 0xa3, 0x64, 0x2c, 0x0a, 0xff, 0x0e, 0x22, 0x2a, 0x4e, 0x48, + 0xa0, 0x6b, 0xb0, 0xc0, 0x6c, 0xb7, 0xdb, 0x77, 0x4c, 0x2a, 0x08, 0xd5, 0x82, 0xf4, 0x73, 0x55, + 0xf9, 0xb9, 0x70, 0x90, 0xe0, 0xe1, 0x94, 0xa4, 0xb0, 0x64, 0x99, 0x9c, 0x74, 0x3d, 0x6a, 0x13, + 0x56, 0x9d, 0x8f, 0x2d, 0xed, 0x44, 0x54, 0x9c, 0x90, 0xd0, 0xff, 0xaa, 0xc1, 0x52, 0x62, 0xbf, + 0xe5, 0xd9, 0x5e, 0x83, 0x85, 0x6e, 0x22, 0xb2, 0xd5, 0xde, 0x47, 0xd6, 0x93, 0x51, 0x8f, 0x53, + 0x92, 0x88, 0x40, 0x89, 0x2a, 0xa4, 0xf0, 0x06, 0x6f, 0x66, 0x0e, 0x8c, 0xd0, 0x87, 0xd8, 0x52, + 0x82, 0xc8, 0x70, 0x8c, 0xac, 0xff, 0x5b, 0x93, 0x41, 0x12, 0xde, 0x69, 0xb4, 0x91, 0xc8, 0x1b, + 0x9a, 0x5c, 0xf2, 0xc2, 0x84, 0x3b, 0x7f, 0xc9, 0x65, 0xcb, 0x7d, 0x29, 0x2e, 0xdb, 0xf5, 0xe2, + 0x1f, 0x3f, 0xa8, 0xcf, 0xbc, 0xff, 0xaf, 0xf5, 0x19, 0xfd, 0x93, 0x1c, 0x54, 0x5a, 0xc4, 0x21, + 0x9c, 0xdc, 0xf5, 0xb9, 0x5c, 0xc1, 0x4d, 0x40, 0x5d, 0x6a, 0x5a, 0xa4, 0x4d, 0xa8, 0xed, 0x75, + 0x0e, 0x88, 0xe5, 0xb9, 0x1d, 0x26, 0x8f, 0x68, 0xd6, 0xf8, 0xca, 0x70, 0x50, 0x47, 0xb7, 0xc6, + 0xb8, 0xf8, 0x02, 0x0d, 0xe4, 0x40, 0xc5, 0xa7, 0xf2, 0xb7, 0xcd, 0x55, 0xc2, 0x15, 0x81, 0xfe, + 0x66, 0xb6, 0xb5, 0xb7, 0x93, 0xaa, 0xc6, 0xca, 0x70, 0x50, 0xaf, 0xa4, 0x48, 0x38, 0x0d, 0x8e, + 0x7e, 0x0c, 0xcb, 0x1e, 0xf5, 0x8f, 0x4d, 0xb7, 0x45, 0x7c, 0xe2, 0x76, 0x88, 0xcb, 0x99, 0xbc, + 0x7c, 0x45, 0x63, 0x55, 0xa4, 0xc9, 0xbb, 0x23, 0x3c, 0x3c, 0x26, 0x8d, 0x1e, 0xc0, 0x8a, 0x4f, + 0x3d, 0xdf, 0xec, 0x9a, 0x02, 0xb1, 0xed, 0x39, 0xb6, 0x75, 0x26, 0x2f, 0x67, 0xc9, 0xb8, 0x3a, + 0x1c, 0xd4, 0x57, 0xda, 0xa3, 0xcc, 0xf3, 0x41, 0xfd, 0x25, 0xb9, 0x75, 0x82, 0x12, 0x33, 0xf1, + 0x38, 0x8c, 0xbe, 0x07, 0xc5, 0x56, 0x9f, 0x4a, 0x0a, 0xfa, 0x21, 0x14, 0x3b, 0xea, 0xb7, 0xda, + 0xd5, 0x57, 0xc2, 0x1a, 0x12, 0xca, 0x9c, 0x0f, 0xea, 0x15, 0x51, 0x2a, 0x1b, 0x21, 0x01, 0x47, + 0x2a, 0xfa, 0x43, 0xa8, 0xec, 0x3e, 0xf6, 0x3d, 0xca, 0xc3, 0xf3, 0x7a, 0x0d, 0x0a, 0x44, 0x12, + 0x24, 0x5a, 0x31, 0x4e, 0x7c, 0x81, 0x18, 0x56, 0x5c, 0xf4, 0x2a, 0xe4, 0xc9, 0x63, 0xd3, 0xe2, + 0x2a, 0x83, 0x55, 0x94, 0x58, 0x7e, 0x57, 0x10, 0x71, 0xc0, 0xd3, 0x9f, 0x6a, 0x00, 0xb7, 0x48, + 0x84, 0xbd, 0x0d, 0x4b, 0xe1, 0xa5, 0x48, 0xdf, 0xd5, 0xaf, 0x2a, 0xed, 0x25, 0x9c, 0x66, 0xe3, + 0x51, 0x79, 0xd4, 0x86, 0x55, 0xdb, 0xb5, 0x9c, 0x7e, 0x87, 0xdc, 0x73, 0x6d, 0xd7, 0xe6, 0xb6, + 0xe9, 0xd8, 0xbf, 0x8a, 0xf2, 0xe8, 0xd7, 0x14, 0xce, 0xea, 0xde, 0x05, 0x32, 0xf8, 0x42, 0x4d, + 0xfd, 0x21, 0x94, 0x64, 0x86, 0x10, 0xc9, 0x54, 0xac, 0x4a, 0x26, 0x08, 0xe5, 0x57, 0xb4, 0x2a, + 0x29, 0x81, 0x03, 0x5e, 0x94, 0x8d, 0x73, 0x93, 0xb2, 0x71, 0xe2, 0x42, 0x38, 0x50, 0x09, 0x74, + 0xc3, 0x02, 0x91, 0xc9, 0xc2, 0x55, 0x28, 0x86, 0x0b, 0x57, 0x56, 0xa2, 0xc6, 0x20, 0x04, 0xc2, + 0x91, 0x44, 0xc2, 0xda, 0x31, 0xa4, 0xb2, 0x5d, 0x36, 0x63, 0xaf, 0xc3, 0xbc, 0xca, 0x37, 0xca, + 0xd6, 0x92, 0x12, 0x9b, 0x0f, 0x4f, 0x21, 0xe4, 0x27, 0x2c, 0xfd, 0x1a, 0xaa, 0x93, 0xba, 0x89, + 0x17, 0xc8, 0xc7, 0xd9, 0x5d, 0xd1, 0xff, 0xa0, 0xc1, 0x72, 0x12, 0x29, 0xfb, 0xf1, 0x65, 0x37, + 0x72, 0x79, 0xdd, 0x4d, 0xec, 0xc8, 0x9f, 0x34, 0x58, 0x4d, 0x2d, 0x6d, 0xaa, 0x13, 0x9f, 0xc2, + 0xa9, 0x64, 0x70, 0xcc, 0x4e, 0x11, 0x1c, 0x4d, 0x28, 0xef, 0x45, 0x71, 0x4f, 0x2f, 0xef, 0x54, + 0xf4, 0xbf, 0x69, 0xb0, 0x90, 0xd0, 0x60, 0xe8, 0x21, 0xcc, 0x8b, 0xfc, 0x66, 0xbb, 0x5d, 0xd5, + 0x45, 0x65, 0x2c, 0x96, 0x09, 0x90, 0x78, 0x5d, 0xed, 0x00, 0x09, 0x87, 0x90, 0xa8, 0x0d, 0x05, + 0x4a, 0x58, 0xdf, 0xe1, 0x2a, 0xb5, 0x5f, 0xcd, 0x58, 0xd6, 0xb8, 0xc9, 0xfb, 0xcc, 0x00, 0x91, + 0xa3, 0xb0, 0xd4, 0xc7, 0x0a, 0x47, 0xff, 0x67, 0x0e, 0x2a, 0xb7, 0xcd, 0x43, 0xe2, 0x1c, 0x10, + 0x87, 0x58, 0xdc, 0xa3, 0xe8, 0x3d, 0x28, 0xf7, 0x4c, 0x6e, 0x1d, 0x4b, 0x6a, 0xd8, 0x0b, 0xb6, + 0xb2, 0x19, 0x4a, 0x21, 0x35, 0xf6, 0x63, 0x98, 0x5d, 0x97, 0xd3, 0x33, 0xe3, 0x25, 0xb5, 0xb0, + 0x72, 0x82, 0x83, 0x93, 0xd6, 0x64, 0x03, 0x2f, 0xbf, 0x77, 0x1f, 0xfb, 0xa2, 0x88, 0x4e, 0xff, + 0x6e, 0x48, 0xb9, 0x80, 0xc9, 0xbb, 0x7d, 0x9b, 0x92, 0x1e, 0x71, 0x79, 0xdc, 0xc0, 0xef, 0x8f, + 0xe0, 0xe3, 0x31, 0x8b, 0x6b, 0x37, 0x60, 0x79, 0xd4, 0x79, 0xb4, 0x0c, 0xb3, 0x27, 0xe4, 0x2c, + 0x88, 0x05, 0x2c, 0x7e, 0xa2, 0x55, 0xc8, 0x9f, 0x9a, 0x4e, 0x5f, 0xe5, 0x1f, 0x1c, 0x7c, 0x5c, + 0xcf, 0x5d, 0xd3, 0xf4, 0xbf, 0x68, 0x50, 0x9d, 0xe4, 0x08, 0xfa, 0x7a, 0x02, 0xc8, 0x28, 0x2b, + 0xaf, 0x66, 0xdf, 0x26, 0x67, 0x01, 0xea, 0x2e, 0x14, 0x3d, 0x5f, 0x3c, 0xb9, 0x3c, 0xaa, 0xe2, + 0xfc, 0xf5, 0x30, 0x76, 0xef, 0x2a, 0xfa, 0xf9, 0xa0, 0x7e, 0x25, 0x05, 0x1f, 0x32, 0x70, 0xa4, + 0x8a, 0x74, 0x28, 0x48, 0x7f, 0x44, 0x51, 0x16, 0xed, 0x93, 0x3c, 0xfc, 0xfb, 0x92, 0x82, 0x15, + 0x47, 0x7f, 0x0f, 0x8a, 0xa2, 0x3b, 0xdc, 0x27, 0xdc, 0x14, 0x57, 0x86, 0x11, 0xe7, 0xe8, 0xb6, + 0xed, 0x9e, 0x28, 0xd7, 0xa2, 0x2b, 0x73, 0xa0, 0xe8, 0x38, 0x92, 0xb8, 0xa8, 0x4c, 0xe5, 0xa6, + 0x2b, 0x53, 0xfa, 0x7f, 0x73, 0x50, 0x16, 0xd6, 0xc3, 0xca, 0xf7, 0x7d, 0xa8, 0x38, 0xc9, 0x35, + 0x29, 0x2f, 0xae, 0x28, 0xc0, 0x74, 0x94, 0xe2, 0xb4, 0xac, 0x50, 0x3e, 0xb2, 0x89, 0xd3, 0x89, + 0x94, 0x73, 0x69, 0xe5, 0x9b, 0x49, 0x26, 0x4e, 0xcb, 0x8a, 0xec, 0xf3, 0x48, 0x9c, 0xb6, 0x6a, + 0x5f, 0xa2, 0xec, 0xf3, 0x13, 0x41, 0xc4, 0x01, 0xef, 0xa2, 0x15, 0xcf, 0x4d, 0x59, 0x98, 0xaf, + 0xc3, 0xa2, 0xe8, 0x31, 0xbc, 0x3e, 0x0f, 0x7b, 0xbc, 0xbc, 0xec, 0x46, 0xd0, 0x70, 0x50, 0x5f, + 0x7c, 0x27, 0xc5, 0xc1, 0x23, 0x92, 0x13, 0x8b, 0x7a, 0xe1, 0x73, 0x17, 0xf5, 0x77, 0xa1, 0xb4, + 0x6f, 0x5b, 0xd4, 0x13, 0x86, 0x45, 0x6e, 0x65, 0xa9, 0xbe, 0x33, 0xca, 0x41, 0xa1, 0x43, 0x21, + 0x5f, 0xec, 0x96, 0x6b, 0xba, 0x5e, 0xd0, 0x5d, 0xe6, 0xe3, 0xdd, 0xba, 0x23, 0x88, 0x38, 0xe0, + 0x5d, 0x5f, 0x15, 0x29, 0xf5, 0x77, 0x4f, 0xeb, 0x33, 0x4f, 0x9e, 0xd6, 0x67, 0x3e, 0x78, 0xaa, + 0xd2, 0xeb, 0xa7, 0x00, 0x70, 0xf7, 0xf0, 0x97, 0xc4, 0x0a, 0x42, 0xee, 0xf2, 0x87, 0xa0, 0x28, + 0x93, 0x6a, 0xfe, 0x20, 0x1f, 0x4d, 0xb9, 0x91, 0x32, 0x99, 0xe0, 0xe1, 0x94, 0x24, 0x6a, 0x42, + 0x29, 0x7a, 0x1c, 0xaa, 0x12, 0xb0, 0xa2, 0xd4, 0x4a, 0xd1, 0x0b, 0x12, 0xc7, 0x32, 0xa9, 0xf8, + 0x9f, 0xbb, 0x34, 0xfe, 0x0d, 0x98, 0xed, 0xdb, 0x1d, 0x79, 0x7e, 0x25, 0xe3, 0xdb, 0xe1, 0x1d, + 0xbe, 0xb7, 0xd7, 0x3a, 0x1f, 0xd4, 0x5f, 0x99, 0x34, 0x56, 0xe1, 0x67, 0x3e, 0x61, 0x8d, 0x7b, + 0x7b, 0x2d, 0x2c, 0x94, 0x2f, 0x8a, 0xa8, 0xc2, 0x94, 0x11, 0xb5, 0x05, 0xa0, 0x56, 0x2d, 0xb4, + 0xe7, 0x83, 0x68, 0x0a, 0x1f, 0xca, 0xb7, 0x22, 0x0e, 0x4e, 0x48, 0x21, 0x06, 0x2b, 0x16, 0x25, + 0xf2, 0xb7, 0x38, 0x7a, 0xc6, 0xcd, 0x9e, 0x5f, 0x2d, 0xca, 0x72, 0xf2, 0xcd, 0x6c, 0x29, 0x56, + 0xa8, 0x19, 0x2f, 0x2b, 0x33, 0x2b, 0x3b, 0xa3, 0x60, 0x78, 0x1c, 0x1f, 0x79, 0xb0, 0xd2, 0x51, + 0x8d, 0x7b, 0x6c, 0xb4, 0x34, 0xb5, 0xd1, 0x2b, 0xc2, 0x60, 0x6b, 0x14, 0x08, 0x8f, 0x63, 0xa3, + 0x9f, 0xc3, 0x5a, 0x48, 0x1c, 0x7f, 0x3d, 0x55, 0x41, 0xee, 0x54, 0x4d, 0xbc, 0xe7, 0x5a, 0x13, + 0xa5, 0xf0, 0x67, 0x20, 0xa0, 0x0e, 0x14, 0x9c, 0xa0, 0x40, 0x96, 0x65, 0x75, 0xfa, 0x41, 0xb6, + 0x55, 0xc4, 0xd1, 0xdf, 0x48, 0x16, 0xc6, 0xe8, 0x05, 0xa1, 0x6a, 0xa2, 0xc2, 0x46, 0x8f, 0xa1, + 0x6c, 0xba, 0xae, 0xc7, 0xcd, 0xe0, 0x3d, 0xb7, 0x20, 0x4d, 0x6d, 0x4f, 0x6d, 0x6a, 0x3b, 0xc6, + 0x18, 0x29, 0xc4, 0x09, 0x0e, 0x4e, 0x9a, 0x42, 0x8f, 0x60, 0xc9, 0x7b, 0xe4, 0x12, 0x8a, 0xc9, + 0x11, 0xa1, 0xc4, 0x15, 0x8f, 0xff, 0x8a, 0xb4, 0xfe, 0x9d, 0x8c, 0xd6, 0x53, 0xca, 0x71, 0x48, + 0xa7, 0xe9, 0x0c, 0x8f, 0x5a, 0x41, 0x0d, 0x80, 0x23, 0xdb, 0x55, 0xed, 0x54, 0x75, 0x31, 0x9e, + 0x76, 0xdc, 0x8c, 0xa8, 0x38, 0x21, 0x81, 0xbe, 0x0b, 0x65, 0xcb, 0xe9, 0x33, 0x4e, 0x82, 0xb1, + 0xca, 0x92, 0xbc, 0x41, 0xd1, 0xfa, 0x76, 0x62, 0x16, 0x4e, 0xca, 0xa1, 0x63, 0x58, 0xb0, 0x13, + 0x7d, 0x5b, 0x75, 0x59, 0xc6, 0xe2, 0xd6, 0xd4, 0xcd, 0x1a, 0x33, 0x96, 0x45, 0x26, 0x4a, 0x52, + 0x70, 0x0a, 0x79, 0xed, 0x7b, 0x50, 0xfe, 0x9c, 0x6d, 0x84, 0x68, 0x43, 0x46, 0x8f, 0x6e, 0xaa, + 0x36, 0xe4, 0xef, 0x39, 0x58, 0x4c, 0x6f, 0x78, 0xd4, 0xae, 0x6b, 0x13, 0xc7, 0x64, 0x61, 0x56, + 0x9e, 0x9d, 0x98, 0x95, 0x55, 0xf2, 0x9b, 0x7b, 0x91, 0xe4, 0xb7, 0x05, 0x60, 0xfa, 0x76, 0x98, + 0xf7, 0x82, 0x3c, 0x1a, 0x65, 0xae, 0x78, 0x10, 0x84, 0x13, 0x52, 0x72, 0x10, 0xe6, 0xb9, 0x9c, + 0x7a, 0x8e, 0x43, 0xa8, 0xaa, 0x7c, 0xc1, 0x20, 0x2c, 0xa2, 0xe2, 0x84, 0x04, 0xba, 0x09, 0xe8, + 0xd0, 0xf1, 0xac, 0x13, 0xb9, 0x05, 0xe1, 0x3d, 0x97, 0x59, 0xb2, 0x18, 0xcc, 0x55, 0x8c, 0x31, + 0x2e, 0xbe, 0x40, 0x43, 0xbf, 0x0b, 0xe9, 0x49, 0x08, 0xba, 0x11, 0x6c, 0x80, 0x16, 0x8d, 0x2a, + 0xa6, 0x5b, 0xbc, 0x7e, 0x15, 0x4a, 0xd8, 0xf3, 0x78, 0xdb, 0xe4, 0xc7, 0x0c, 0xd5, 0x21, 0xef, + 0x8b, 0x1f, 0x6a, 0xcc, 0x25, 0x27, 0x8d, 0x92, 0x83, 0x03, 0xba, 0xfe, 0x7b, 0x0d, 0x5e, 0x9e, + 0x38, 0x75, 0x12, 0x1b, 0x69, 0x45, 0x5f, 0xca, 0xa5, 0x68, 0x23, 0x63, 0x39, 0x9c, 0x90, 0x12, + 0xdd, 0x52, 0x6a, 0x54, 0x35, 0xda, 0x2d, 0xa5, 0xac, 0xe1, 0xb4, 0xac, 0xfe, 0x9f, 0x1c, 0x14, + 0x82, 0x07, 0x05, 0x7a, 0x08, 0x45, 0x71, 0x25, 0x3a, 0x26, 0x37, 0xa5, 0xe5, 0xcc, 0x33, 0xe3, + 0xb0, 0xeb, 0x8c, 0x6b, 0x6c, 0x48, 0xc1, 0x11, 0x22, 0x7a, 0x0d, 0x0a, 0x4c, 0xda, 0x51, 0xee, + 0x45, 0x49, 0x32, 0xb0, 0x8e, 0x15, 0x57, 0xf4, 0x2e, 0x3d, 0xc2, 0x98, 0xd9, 0x0d, 0x63, 0x36, + 0xea, 0x5d, 0xf6, 0x03, 0x32, 0x0e, 0xf9, 0xe8, 0x2d, 0xf1, 0x7e, 0x32, 0x59, 0xd4, 0xbb, 0xd5, + 0x42, 0x48, 0x2c, 0xa9, 0xe7, 0x83, 0xfa, 0x82, 0x02, 0x97, 0xdf, 0x58, 0x49, 0xa3, 0x07, 0x30, + 0xdf, 0x21, 0xdc, 0xb4, 0x9d, 0xa0, 0x65, 0xcb, 0x3c, 0x53, 0x0b, 0xc0, 0x5a, 0x81, 0xaa, 0x51, + 0x16, 0x3e, 0xa9, 0x0f, 0x1c, 0x02, 0x8a, 0xfb, 0x66, 0x79, 0x9d, 0x60, 0x20, 0x9c, 0x8f, 0xef, + 0xdb, 0x8e, 0xd7, 0x21, 0x58, 0x72, 0xf4, 0x27, 0x1a, 0x94, 0x03, 0xa4, 0x1d, 0xb3, 0xcf, 0x08, + 0xda, 0x8c, 0x56, 0x11, 0x1c, 0x77, 0x58, 0x8a, 0xe7, 0xde, 0x39, 0xf3, 0xc9, 0xf9, 0xa0, 0x5e, + 0x92, 0x62, 0xe2, 0x23, 0x5a, 0x40, 0x62, 0x8f, 0x72, 0x97, 0xec, 0xd1, 0xab, 0x90, 0x97, 0xed, + 0xb1, 0xda, 0xcc, 0xa8, 0xbf, 0x93, 0x2d, 0x34, 0x0e, 0x78, 0xfa, 0xc7, 0x39, 0xa8, 0xa4, 0x16, + 0x97, 0xa1, 0x99, 0x8b, 0x1e, 0xf9, 0xb9, 0x0c, 0x83, 0xa3, 0xc9, 0x63, 0xfc, 0x9f, 0x42, 0xc1, + 0x12, 0xeb, 0x0b, 0xff, 0x8f, 0xb2, 0x39, 0xcd, 0x51, 0xc8, 0x9d, 0x89, 0x23, 0x49, 0x7e, 0x32, + 0xac, 0x00, 0xd1, 0x2d, 0x58, 0xa1, 0x84, 0xd3, 0xb3, 0xed, 0x23, 0x4e, 0x68, 0xb2, 0x47, 0xcf, + 0xc7, 0xed, 0x0e, 0x1e, 0x15, 0xc0, 0xe3, 0x3a, 0x61, 0x86, 0x2c, 0xbc, 0x40, 0x86, 0xd4, 0x1d, + 0x98, 0xfb, 0x3f, 0xb6, 0xe6, 0x3f, 0x83, 0x52, 0xdc, 0x3c, 0x7d, 0xc1, 0x26, 0xf5, 0x5f, 0x40, + 0x51, 0x44, 0x63, 0xd8, 0xf4, 0x5f, 0x52, 0x80, 0xd2, 0xa5, 0x21, 0x97, 0xa5, 0x34, 0xe8, 0x5b, + 0x10, 0xfc, 0x77, 0x46, 0x64, 0x53, 0x9b, 0x93, 0x5e, 0x2a, 0x9b, 0xee, 0x09, 0x02, 0x0e, 0xe8, + 0x89, 0x61, 0xcf, 0x6f, 0x35, 0x00, 0xf9, 0xc4, 0xdb, 0x3d, 0x15, 0xcf, 0xf2, 0x75, 0x98, 0x13, + 0x27, 0x30, 0xea, 0x98, 0xbc, 0x46, 0x92, 0x83, 0xee, 0x41, 0xc1, 0x93, 0x4d, 0x95, 0x9a, 0xbe, + 0xbc, 0x31, 0x31, 0xf2, 0xd4, 0x3f, 0x5e, 0x1b, 0xd8, 0x7c, 0xb4, 0xfb, 0x98, 0x13, 0x57, 0xf8, + 0x18, 0x47, 0x5d, 0xd0, 0x99, 0x61, 0x05, 0x66, 0x6c, 0x3c, 0x7b, 0x5e, 0x9b, 0xf9, 0xf0, 0x79, + 0x6d, 0xe6, 0xa3, 0xe7, 0xb5, 0x99, 0xf7, 0x87, 0x35, 0xed, 0xd9, 0xb0, 0xa6, 0x7d, 0x38, 0xac, + 0x69, 0x1f, 0x0d, 0x6b, 0xda, 0xc7, 0xc3, 0x9a, 0xf6, 0xe4, 0x93, 0xda, 0xcc, 0x83, 0xdc, 0xe9, + 0xe6, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x80, 0x43, 0x11, 0x41, 0xbe, 0x1e, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto new file mode 100644 index 000000000..a6be57a87 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -0,0 +1,764 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.apis.meta.v1; + +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// APIGroup contains the name, the supported versions, and the preferred version +// of a group. +message APIGroup { + // name is the name of the group. + optional string name = 1; + + // versions are the versions supported in this group. + repeated GroupVersionForDiscovery versions = 2; + + // preferredVersion is the version preferred by the API server, which + // probably is the storage version. + // +optional + optional GroupVersionForDiscovery preferredVersion = 3; + + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4; +} + +// APIGroupList is a list of APIGroup, to allow clients to discover the API at +// /apis. +message APIGroupList { + // groups is a list of APIGroup. + repeated APIGroup groups = 1; +} + +// APIResource specifies the name of a resource and whether it is namespaced. +message APIResource { + // name is the plural name of the resource. + optional string name = 1; + + // singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // The singularName is more correct for reporting status on a single item and both singular and plural are allowed + // from the kubectl CLI interface. + optional string singularName = 6; + + // namespaced indicates if a resource is namespaced or not. + optional bool namespaced = 2; + + // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') + optional string kind = 3; + + // verbs is a list of supported kube verbs (this includes get, list, watch, create, + // update, patch, delete, deletecollection, and proxy) + optional Verbs verbs = 4; + + // shortNames is a list of suggested short names of the resource. + repeated string shortNames = 5; + + // categories is a list of the grouped resources this resource belongs to (e.g. 'all') + repeated string categories = 7; +} + +// APIResourceList is a list of APIResource, it is used to expose the name of the +// resources supported in a specific group and version, and if the resource +// is namespaced. +message APIResourceList { + // groupVersion is the group and version this APIResourceList is for. + optional string groupVersion = 1; + + // resources contains the name of the resources and if they are namespaced. + repeated APIResource resources = 2; +} + +// APIVersions lists the versions that are available, to allow clients to +// discover the API at /api, which is the root path of the legacy v1 API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message APIVersions { + // versions are the api versions that are available. + repeated string versions = 1; + + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2; +} + +// DeleteOptions may be provided when deleting an API object. +message DeleteOptions { + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + optional int64 gracePeriodSeconds = 1; + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + optional Preconditions preconditions = 2; + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + optional bool orphanDependents = 3; + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + optional string propagationPolicy = 4; +} + +// Duration is a wrapper around time.Duration which supports correct +// marshaling to YAML and JSON. In particular, it marshals into strings, which +// can be used as map keys in json. +message Duration { + optional int64 duration = 1; +} + +// ExportOptions is the query options to the standard REST get call. +message ExportOptions { + // Should this value be exported. Export strips fields that a user can not specify. + optional bool export = 1; + + // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. + optional bool exact = 2; +} + +// GetOptions is the standard query options to the standard REST get call. +message GetOptions { + // When specified: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + optional string resourceVersion = 1; + + // If true, partially initialized resources are included in the response. + // +optional + optional bool includeUninitialized = 2; +} + +// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupKind { + optional string group = 1; + + optional string kind = 2; +} + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupResource { + optional string group = 1; + + optional string resource = 2; +} + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersion { + optional string group = 1; + + optional string version = 2; +} + +// GroupVersion contains the "group/version" and "version" string of a version. +// It is made a struct to keep extensibility. +message GroupVersionForDiscovery { + // groupVersion specifies the API group and version in the form "group/version" + optional string groupVersion = 1; + + // version specifies the version in the form of "version". This is to save + // the clients the trouble of splitting the GroupVersion. + optional string version = 2; +} + +// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersionKind { + optional string group = 1; + + optional string version = 2; + + optional string kind = 3; +} + +// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersionResource { + optional string group = 1; + + optional string version = 2; + + optional string resource = 3; +} + +// Initializer is information about an initializer that has not yet completed. +message Initializer { + // name of the process that is responsible for initializing this object. + optional string name = 1; +} + +// Initializers tracks the progress of initialization. +message Initializers { + // Pending is a list of initializers that must execute in order before this object is visible. + // When the last pending initializer is removed, and no failing result is set, the initializers + // struct will be set to nil and the object is considered as initialized and visible to all + // clients. + repeated Initializer pending = 1; + + // If result is set with the Failure field, the object will be persisted to storage and then deleted, + // ensuring that other clients can observe the deletion. + optional Status result = 2; +} + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +message LabelSelector { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + // +optional + map matchLabels = 1; + + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + // +optional + repeated LabelSelectorRequirement matchExpressions = 2; +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message LabelSelectorRequirement { + // key is the label key that the selector applies to. + // +patchMergeKey=key + // +patchStrategy=merge + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators ard In, NotIn, Exists and DoesNotExist. + optional string operator = 2; + + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + // +optional + repeated string values = 3; +} + +// ListMeta describes metadata that synthetic resources must have, including lists and +// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. +message ListMeta { + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + optional string selfLink = 1; + + // String that identifies the server's internal version of this object that + // can be used by clients to determine when objects have changed. + // Value must be treated as opaque by clients and passed unmodified back to the server. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 2; +} + +// ListOptions is the query options to a standard REST list call. +message ListOptions { + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + optional string labelSelector = 1; + + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + optional string fieldSelector = 2; + + // If true, partially initialized resources are included in the response. + // +optional + optional bool includeUninitialized = 6; + + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + optional bool watch = 3; + + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + // +optional + optional string resourceVersion = 4; + + // Timeout for the list/watch call. + // +optional + optional int64 timeoutSeconds = 5; +} + +// MicroTime is version of Time with microsecond level precision. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +message MicroTime { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +message ObjectMeta { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + optional string name = 1; + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // +optional + optional string generateName = 2; + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + optional string namespace = 3; + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + optional string selfLink = 4; + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + optional string uid = 5; + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 6; + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + optional int64 generation = 7; + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional Time creationTimestamp = 8; + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional Time deletionTimestamp = 9; + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + optional int64 deletionGracePeriodSeconds = 10; + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + map labels = 11; + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + map annotations = 12; + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + // +patchMergeKey=uid + // +patchStrategy=merge + repeated OwnerReference ownerReferences = 13; + + // An initializer is a controller which enforces some system invariant at object creation time. + // This field is a list of initializers that have not yet acted on this object. If nil or empty, + // this object has been completely initialized. Otherwise, the object is considered uninitialized + // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to + // observe uninitialized objects. + // + // When an object is created, the system will populate this list with the current set of initializers. + // Only privileged users may set or modify this list. Once it is empty, it may not be modified further + // by any user. + optional Initializers initializers = 16; + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + // +patchStrategy=merge + repeated string finalizers = 14; + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + optional string clusterName = 15; +} + +// OwnerReference contains enough information to let you identify an owning +// object. Currently, an owning object must be in the same namespace, so there +// is no namespace field. +message OwnerReference { + // API version of the referent. + optional string apiVersion = 5; + + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + optional string kind = 1; + + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 3; + + // UID of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + optional string uid = 4; + + // If true, this reference points to the managing controller. + // +optional + optional bool controller = 6; + + // If true, AND if the owner has the "foregroundDeletion" finalizer, then + // the owner cannot be deleted from the key-value store until this + // reference is removed. + // Defaults to false. + // To set this field, a user needs "delete" permission of the owner, + // otherwise 422 (Unprocessable Entity) will be returned. + // +optional + optional bool blockOwnerDeletion = 7; +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +message Preconditions { + // Specifies the target UID. + // +optional + optional string uid = 1; +} + +// RootPaths lists the paths available at root. +// For example: "/healthz", "/apis". +message RootPaths { + // paths are the paths available at root. + repeated string paths = 1; +} + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +message ServerAddressByClientCIDR { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + optional string clientCIDR = 1; + + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + optional string serverAddress = 2; +} + +// Status is a return value for calls that don't return other objects. +message Status { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional ListMeta metadata = 1; + + // Status of the operation. + // One of: "Success" or "Failure". + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional string status = 2; + + // A human-readable description of the status of this operation. + // +optional + optional string message = 3; + + // A machine-readable description of why this operation is in the + // "Failure" status. If this value is empty there + // is no information available. A Reason clarifies an HTTP status + // code but does not override it. + // +optional + optional string reason = 4; + + // Extended data associated with the reason. Each reason may define its + // own extended details. This field is optional and the data returned + // is not guaranteed to conform to any schema except that defined by + // the reason type. + // +optional + optional StatusDetails details = 5; + + // Suggested HTTP return code for this status, 0 if not set. + // +optional + optional int32 code = 6; +} + +// StatusCause provides more information about an api.Status failure, including +// cases when multiple errors are encountered. +message StatusCause { + // A machine-readable description of the cause of the error. If this value is + // empty there is no information available. + // +optional + optional string reason = 1; + + // A human-readable description of the cause of the error. This field may be + // presented as-is to a reader. + // +optional + optional string message = 2; + + // The field of the resource that has caused this error, as named by its JSON + // serialization. May include dot and postfix notation for nested attributes. + // Arrays are zero-indexed. Fields may appear more than once in an array of + // causes due to fields having multiple errors. + // Optional. + // + // Examples: + // "name" - the field "name" on the current resource + // "items[0].name" - the field "name" on the first array entry in "items" + // +optional + optional string field = 3; +} + +// StatusDetails is a set of additional properties that MAY be set by the +// server to provide additional information about a response. The Reason +// field of a Status object defines what attributes will be set. Clients +// must ignore fields that do not match the defined type of each attribute, +// and should assume that any attribute may be empty, invalid, or under +// defined. +message StatusDetails { + // The name attribute of the resource associated with the status StatusReason + // (when there is a single name which can be described). + // +optional + optional string name = 1; + + // The group attribute of the resource associated with the status StatusReason. + // +optional + optional string group = 2; + + // The kind attribute of the resource associated with the status StatusReason. + // On some operations may differ from the requested resource Kind. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional string kind = 3; + + // UID of the resource. + // (when there is a single resource which can be described). + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + optional string uid = 6; + + // The Causes array includes more details associated with the StatusReason + // failure. Not all StatusReasons may provide detailed causes. + // +optional + repeated StatusCause causes = 4; + + // If specified, the time in seconds before the operation should be retried. + // +optional + optional int32 retryAfterSeconds = 5; +} + +// Time is a wrapper around time.Time which supports correct +// marshaling to YAML and JSON. Wrappers are provided for many +// of the factory methods that the time package offers. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +message Time { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + +// Timestamp is a struct that is equivalent to Time, but intended for +// protobuf marshalling/unmarshalling. It is generated into a serialization +// that matches Time. Do not use in Go structs. +message Timestamp { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +message TypeMeta { + // Kind is a string value representing the REST resource this object represents. + // Servers may infer this from the endpoint the client submits requests to. + // Cannot be updated. + // In CamelCase. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional string kind = 1; + + // APIVersion defines the versioned schema of this representation of an object. + // Servers should convert recognized schemas to the latest internal value, and + // may reject unrecognized values. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources + // +optional + optional string apiVersion = 2; +} + +// Verbs masks the value so protobuf can generate +// +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message Verbs { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// Event represents a single event to a watched resource. +// +// +protobuf=true +message WatchEvent { + optional string type = 1; + + // Object is: + // * If Type is Added or Modified: the new state of the object. + // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Error: *Status is recommended; other types may make sense + // depending on context. + optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go new file mode 100644 index 000000000..bd4c6d9b5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go @@ -0,0 +1,148 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupResource struct { + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"` +} + +func (gr *GroupResource) String() string { + if len(gr.Group) == 0 { + return gr.Resource + } + return gr.Resource + "." + gr.Group +} + +// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupVersionResource struct { + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` + Resource string `json:"resource" protobuf:"bytes,3,opt,name=resource"` +} + +func (gvr *GroupVersionResource) String() string { + return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") +} + +// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupKind struct { + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` +} + +func (gk *GroupKind) String() string { + if len(gk.Group) == 0 { + return gk.Kind + } + return gk.Kind + "." + gk.Group +} + +// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupVersionKind struct { + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` + Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"` +} + +func (gvk GroupVersionKind) String() string { + return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind +} + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupVersion struct { + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` +} + +// Empty returns true if group and version are empty +func (gv GroupVersion) Empty() bool { + return len(gv.Group) == 0 && len(gv.Version) == 0 +} + +// String puts "group" and "version" into a single "group/version" string. For the legacy v1 +// it returns "v1". +func (gv GroupVersion) String() string { + // special case the internal apiVersion for the legacy kube types + if gv.Empty() { + return "" + } + + // special case of "v1" for backward compatibility + if len(gv.Group) == 0 && gv.Version == "v1" { + return gv.Version + } + if len(gv.Group) > 0 { + return gv.Group + "/" + gv.Version + } + return gv.Version +} + +// MarshalJSON implements the json.Marshaller interface. +func (gv GroupVersion) MarshalJSON() ([]byte, error) { + s := gv.String() + if strings.Count(s, "/") > 1 { + return []byte{}, fmt.Errorf("illegal GroupVersion %v: contains more than one /", s) + } + return json.Marshal(s) +} + +func (gv *GroupVersion) unmarshal(value []byte) error { + var s string + if err := json.Unmarshal(value, &s); err != nil { + return err + } + parsed, err := schema.ParseGroupVersion(s) + if err != nil { + return err + } + gv.Group, gv.Version = parsed.Group, parsed.Version + return nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (gv *GroupVersion) UnmarshalJSON(value []byte) error { + return gv.unmarshal(value) +} + +// UnmarshalTEXT implements the Ugorji's encoding.TextUnmarshaler interface. +func (gv *GroupVersion) UnmarshalText(value []byte) error { + return gv.unmarshal(value) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go new file mode 100644 index 000000000..d845d7b0f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -0,0 +1,234 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" +) + +// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements +// labels.Selector +// Note: This function should be kept in sync with the selector methods in pkg/labels/selector.go +func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) { + if ps == nil { + return labels.Nothing(), nil + } + if len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 { + return labels.Everything(), nil + } + selector := labels.NewSelector() + for k, v := range ps.MatchLabels { + r, err := labels.NewRequirement(k, selection.Equals, []string{v}) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + for _, expr := range ps.MatchExpressions { + var op selection.Operator + switch expr.Operator { + case LabelSelectorOpIn: + op = selection.In + case LabelSelectorOpNotIn: + op = selection.NotIn + case LabelSelectorOpExists: + op = selection.Exists + case LabelSelectorOpDoesNotExist: + op = selection.DoesNotExist + default: + return nil, fmt.Errorf("%q is not a valid pod selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, append([]string(nil), expr.Values...)) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +// LabelSelectorAsMap converts the LabelSelector api type into a map of strings, ie. the +// original structure of a label selector. Operators that cannot be converted into plain +// labels (Exists, DoesNotExist, NotIn, and In with more than one value) will result in +// an error. +func LabelSelectorAsMap(ps *LabelSelector) (map[string]string, error) { + if ps == nil { + return nil, nil + } + selector := map[string]string{} + for k, v := range ps.MatchLabels { + selector[k] = v + } + for _, expr := range ps.MatchExpressions { + switch expr.Operator { + case LabelSelectorOpIn: + if len(expr.Values) != 1 { + return selector, fmt.Errorf("operator %q without a single value cannot be converted into the old label selector format", expr.Operator) + } + // Should we do anything in case this will override a previous key-value pair? + selector[expr.Key] = expr.Values[0] + case LabelSelectorOpNotIn, LabelSelectorOpExists, LabelSelectorOpDoesNotExist: + return selector, fmt.Errorf("operator %q cannot be converted into the old label selector format", expr.Operator) + default: + return selector, fmt.Errorf("%q is not a valid selector operator", expr.Operator) + } + } + return selector, nil +} + +// ParseToLabelSelector parses a string representing a selector into a LabelSelector object. +// Note: This function should be kept in sync with the parser in pkg/labels/selector.go +func ParseToLabelSelector(selector string) (*LabelSelector, error) { + reqs, err := labels.ParseToRequirements(selector) + if err != nil { + return nil, fmt.Errorf("couldn't parse the selector string \"%s\": %v", selector, err) + } + + labelSelector := &LabelSelector{ + MatchLabels: map[string]string{}, + MatchExpressions: []LabelSelectorRequirement{}, + } + for _, req := range reqs { + var op LabelSelectorOperator + switch req.Operator() { + case selection.Equals, selection.DoubleEquals: + vals := req.Values() + if vals.Len() != 1 { + return nil, fmt.Errorf("equals operator must have exactly one value") + } + val, ok := vals.PopAny() + if !ok { + return nil, fmt.Errorf("equals operator has exactly one value but it cannot be retrieved") + } + labelSelector.MatchLabels[req.Key()] = val + continue + case selection.In: + op = LabelSelectorOpIn + case selection.NotIn: + op = LabelSelectorOpNotIn + case selection.Exists: + op = LabelSelectorOpExists + case selection.DoesNotExist: + op = LabelSelectorOpDoesNotExist + case selection.GreaterThan, selection.LessThan: + // Adding a separate case for these operators to indicate that this is deliberate + return nil, fmt.Errorf("%q isn't supported in label selectors", req.Operator()) + default: + return nil, fmt.Errorf("%q is not a valid label selector operator", req.Operator()) + } + labelSelector.MatchExpressions = append(labelSelector.MatchExpressions, LabelSelectorRequirement{ + Key: req.Key(), + Operator: op, + Values: req.Values().List(), + }) + } + return labelSelector, nil +} + +// SetAsLabelSelector converts the labels.Set object into a LabelSelector api object. +func SetAsLabelSelector(ls labels.Set) *LabelSelector { + if ls == nil { + return nil + } + + selector := &LabelSelector{ + MatchLabels: make(map[string]string), + } + for label, value := range ls { + selector.MatchLabels[label] = value + } + + return selector +} + +// FormatLabelSelector convert labelSelector into plain string +func FormatLabelSelector(labelSelector *LabelSelector) string { + selector, err := LabelSelectorAsSelector(labelSelector) + if err != nil { + return "" + } + + l := selector.String() + if len(l) == 0 { + l = "" + } + return l +} + +func ExtractGroupVersions(l *APIGroupList) []string { + var groupVersions []string + for _, g := range l.Groups { + for _, gv := range g.Versions { + groupVersions = append(groupVersions, gv.GroupVersion) + } + } + return groupVersions +} + +// HasAnnotation returns a bool if passed in annotation exists +func HasAnnotation(obj ObjectMeta, ann string) bool { + _, found := obj.Annotations[ann] + return found +} + +// SetMetaDataAnnotation sets the annotation and value +func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) { + if obj.Annotations == nil { + obj.Annotations = make(map[string]string) + } + obj.Annotations[ann] = value +} + +// SingleObject returns a ListOptions for watching a single object. +func SingleObject(meta ObjectMeta) ListOptions { + return ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", meta.Name).String(), + ResourceVersion: meta.ResourceVersion, + } +} + +// NewDeleteOptions returns a DeleteOptions indicating the resource should +// be deleted within the specified grace period. Use zero to indicate +// immediate deletion. If you would prefer to use the default grace period, +// use &metav1.DeleteOptions{} directly. +func NewDeleteOptions(grace int64) *DeleteOptions { + return &DeleteOptions{GracePeriodSeconds: &grace} +} + +// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set. +func NewPreconditionDeleteOptions(uid string) *DeleteOptions { + u := types.UID(uid) + p := Preconditions{UID: &u} + return &DeleteOptions{Preconditions: &p} +} + +// NewUIDPreconditions returns a Preconditions with UID set. +func NewUIDPreconditions(uid string) *Preconditions { + u := types.UID(uid) + return &Preconditions{UID: &u} +} + +// HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values. +func HasObjectMetaSystemFieldValues(meta Object) bool { + return !meta.GetCreationTimestamp().Time.IsZero() || + len(meta.GetUID()) != 0 +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go new file mode 100644 index 000000000..8b4c0423e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go @@ -0,0 +1,75 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// Clones the given selector and returns a new selector with the given key and value added. +// Returns the given selector, if labelKey is empty. +func CloneSelectorAndAddLabel(selector *LabelSelector, labelKey, labelValue string) *LabelSelector { + if labelKey == "" { + // Don't need to add a label. + return selector + } + + // Clone. + newSelector := new(LabelSelector) + + // TODO(madhusudancs): Check if you can use deepCopy_extensions_LabelSelector here. + newSelector.MatchLabels = make(map[string]string) + if selector.MatchLabels != nil { + for key, val := range selector.MatchLabels { + newSelector.MatchLabels[key] = val + } + } + newSelector.MatchLabels[labelKey] = labelValue + + if selector.MatchExpressions != nil { + newMExps := make([]LabelSelectorRequirement, len(selector.MatchExpressions)) + for i, me := range selector.MatchExpressions { + newMExps[i].Key = me.Key + newMExps[i].Operator = me.Operator + if me.Values != nil { + newMExps[i].Values = make([]string, len(me.Values)) + copy(newMExps[i].Values, me.Values) + } else { + newMExps[i].Values = nil + } + } + newSelector.MatchExpressions = newMExps + } else { + newSelector.MatchExpressions = nil + } + + return newSelector +} + +// AddLabelToSelector returns a selector with the given key and value added to the given selector's MatchLabels. +func AddLabelToSelector(selector *LabelSelector, labelKey, labelValue string) *LabelSelector { + if labelKey == "" { + // Don't need to add a label. + return selector + } + if selector.MatchLabels == nil { + selector.MatchLabels = make(map[string]string) + } + selector.MatchLabels[labelKey] = labelValue + return selector +} + +// SelectorHasLabel checks if the given selector contains the given label key in its MatchLabels +func SelectorHasLabel(selector *LabelSelector, labelKey string) bool { + return len(selector.MatchLabels[labelKey]) > 0 +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go new file mode 100644 index 000000000..0ee7d99ca --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -0,0 +1,201 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +// TODO: move this, Object, List, and Type to a different package +type ObjectMetaAccessor interface { + GetObjectMeta() Object +} + +// Object lets you work with object metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field (Name, UID, Namespace on lists) will be a no-op and return +// a default value. +type Object interface { + GetNamespace() string + SetNamespace(namespace string) + GetName() string + SetName(name string) + GetGenerateName() string + SetGenerateName(name string) + GetUID() types.UID + SetUID(uid types.UID) + GetResourceVersion() string + SetResourceVersion(version string) + GetGeneration() int64 + SetGeneration(generation int64) + GetSelfLink() string + SetSelfLink(selfLink string) + GetCreationTimestamp() Time + SetCreationTimestamp(timestamp Time) + GetDeletionTimestamp() *Time + SetDeletionTimestamp(timestamp *Time) + GetDeletionGracePeriodSeconds() *int64 + SetDeletionGracePeriodSeconds(*int64) + GetLabels() map[string]string + SetLabels(labels map[string]string) + GetAnnotations() map[string]string + SetAnnotations(annotations map[string]string) + GetInitializers() *Initializers + SetInitializers(initializers *Initializers) + GetFinalizers() []string + SetFinalizers(finalizers []string) + GetOwnerReferences() []OwnerReference + SetOwnerReferences([]OwnerReference) + GetClusterName() string + SetClusterName(clusterName string) +} + +// ListMetaAccessor retrieves the list interface from an object +type ListMetaAccessor interface { + GetListMeta() List +} + +// List lets you work with list metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field will be a no-op and return a default value. +// TODO: move this, and TypeMeta and ListMeta, to a different package +type List interface { + GetResourceVersion() string + SetResourceVersion(version string) + GetSelfLink() string + SetSelfLink(selfLink string) +} + +// Type exposes the type and APIVersion of versioned or internal API objects. +// TODO: move this, and TypeMeta and ListMeta, to a different package +type Type interface { + GetAPIVersion() string + SetAPIVersion(version string) + GetKind() string + SetKind(kind string) +} + +func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink } +func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } + +func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } + +// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ListMeta) GetListMeta() List { return obj } + +func (obj *ObjectMeta) GetObjectMeta() Object { return obj } + +// Namespace implements metav1.Object for any object with an ObjectMeta typed field. Allows +// fast, direct access to metadata fields for API objects. +func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } +func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } +func (meta *ObjectMeta) GetName() string { return meta.Name } +func (meta *ObjectMeta) SetName(name string) { meta.Name = name } +func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } +func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } +func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } +func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } +func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ObjectMeta) GetGeneration() int64 { return meta.Generation } +func (meta *ObjectMeta) SetGeneration(generation int64) { meta.Generation = generation } +func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } +func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } +func (meta *ObjectMeta) GetCreationTimestamp() Time { return meta.CreationTimestamp } +func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp Time) { + meta.CreationTimestamp = creationTimestamp +} +func (meta *ObjectMeta) GetDeletionTimestamp() *Time { return meta.DeletionTimestamp } +func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *Time) { + meta.DeletionTimestamp = deletionTimestamp +} +func (meta *ObjectMeta) GetDeletionGracePeriodSeconds() *int64 { return meta.DeletionGracePeriodSeconds } +func (meta *ObjectMeta) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) { + meta.DeletionGracePeriodSeconds = deletionGracePeriodSeconds +} +func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels } +func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } +func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } +func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } +func (meta *ObjectMeta) GetInitializers() *Initializers { return meta.Initializers } +func (meta *ObjectMeta) SetInitializers(initializers *Initializers) { meta.Initializers = initializers } +func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } +func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } + +func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { + if meta.OwnerReferences == nil { + return nil + } + ret := make([]OwnerReference, len(meta.OwnerReferences)) + for i := 0; i < len(meta.OwnerReferences); i++ { + ret[i].Kind = meta.OwnerReferences[i].Kind + ret[i].Name = meta.OwnerReferences[i].Name + ret[i].UID = meta.OwnerReferences[i].UID + ret[i].APIVersion = meta.OwnerReferences[i].APIVersion + if meta.OwnerReferences[i].Controller != nil { + value := *meta.OwnerReferences[i].Controller + ret[i].Controller = &value + } + if meta.OwnerReferences[i].BlockOwnerDeletion != nil { + value := *meta.OwnerReferences[i].BlockOwnerDeletion + ret[i].BlockOwnerDeletion = &value + } + } + return ret +} + +func (meta *ObjectMeta) SetOwnerReferences(references []OwnerReference) { + if references == nil { + meta.OwnerReferences = nil + return + } + newReferences := make([]OwnerReference, len(references)) + for i := 0; i < len(references); i++ { + newReferences[i].Kind = references[i].Kind + newReferences[i].Name = references[i].Name + newReferences[i].UID = references[i].UID + newReferences[i].APIVersion = references[i].APIVersion + if references[i].Controller != nil { + value := *references[i].Controller + newReferences[i].Controller = &value + } + if references[i].BlockOwnerDeletion != nil { + value := *references[i].BlockOwnerDeletion + newReferences[i].BlockOwnerDeletion = &value + } + } + meta.OwnerReferences = newReferences +} + +func (meta *ObjectMeta) GetClusterName() string { + return meta.ClusterName +} +func (meta *ObjectMeta) SetClusterName(clusterName string) { + meta.ClusterName = clusterName +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go new file mode 100644 index 000000000..d55f446b0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -0,0 +1,184 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" + + "k8s.io/apimachinery/pkg/openapi" + + "github.com/go-openapi/spec" + "github.com/google/gofuzz" +) + +const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" + +// MicroTime is version of Time with microsecond level precision. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +type MicroTime struct { + time.Time `protobuf:"-"` +} + +// DeepCopy returns a deep-copy of the MicroTime value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t MicroTime) DeepCopy() MicroTime { + return t +} + +// String returns the representation of the time. +func (t MicroTime) String() string { + return t.Time.String() +} + +// NewMicroTime returns a wrapped instance of the provided time +func NewMicroTime(time time.Time) MicroTime { + return MicroTime{time} +} + +// DateMicro returns the MicroTime corresponding to the supplied parameters +// by wrapping time.Date. +func DateMicro(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) MicroTime { + return MicroTime{time.Date(year, month, day, hour, min, sec, nsec, loc)} +} + +// NowMicro returns the current local time. +func NowMicro() MicroTime { + return MicroTime{time.Now()} +} + +// IsZero returns true if the value is nil or time is zero. +func (t *MicroTime) IsZero() bool { + if t == nil { + return true + } + return t.Time.IsZero() +} + +// Before reports whether the time instant t is before u. +func (t MicroTime) Before(u MicroTime) bool { + return t.Time.Before(u.Time) +} + +// Equal reports whether the time instant t is equal to u. +func (t MicroTime) Equal(u MicroTime) bool { + return t.Time.Equal(u.Time) +} + +// BeforeTime reports whether the time instant t is before second-lever precision u. +func (t MicroTime) BeforeTime(u Time) bool { + return t.Time.Before(u.Time) +} + +// EqualTime reports whether the time instant t is equal to second-lever precision u. +func (t MicroTime) EqualTime(u Time) bool { + return t.Time.Equal(u.Time) +} + +// UnixMicro returns the local time corresponding to the given Unix time +// by wrapping time.Unix. +func UnixMicro(sec int64, nsec int64) MicroTime { + return MicroTime{time.Unix(sec, nsec)} +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (t *MicroTime) UnmarshalJSON(b []byte) error { + if len(b) == 4 && string(b) == "null" { + t.Time = time.Time{} + return nil + } + + var str string + json.Unmarshal(b, &str) + + pt, err := time.Parse(RFC3339Micro, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// UnmarshalQueryParameter converts from a URL query parameter value to an object +func (t *MicroTime) UnmarshalQueryParameter(str string) error { + if len(str) == 0 { + t.Time = time.Time{} + return nil + } + // Tolerate requests from older clients that used JSON serialization to build query params + if len(str) == 4 && str == "null" { + t.Time = time.Time{} + return nil + } + + pt, err := time.Parse(RFC3339Micro, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (t MicroTime) MarshalJSON() ([]byte, error) { + if t.IsZero() { + // Encode unset/nil objects as JSON's "null". + return []byte("null"), nil + } + + return json.Marshal(t.UTC().Format(RFC3339Micro)) +} + +func (_ MicroTime) OpenAPIDefinition() openapi.OpenAPIDefinition { + return openapi.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "date-time", + }, + }, + } +} + +// MarshalQueryParameter converts to a URL query parameter value +func (t MicroTime) MarshalQueryParameter() (string, error) { + if t.IsZero() { + // Encode unset/nil objects as an empty string + return "", nil + } + + return t.UTC().Format(RFC3339Micro), nil +} + +// Fuzz satisfies fuzz.Interface. +func (t *MicroTime) Fuzz(c fuzz.Continue) { + if t == nil { + return + } + // Allow for about 1000 years of randomness. Leave off nanoseconds + // because JSON doesn't represent them so they can't round-trip + // properly. + t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60*1000*1000), 0) +} + +var _ fuzz.Interface = &MicroTime{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go new file mode 100644 index 000000000..14841be51 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" +) + +// Timestamp is declared in time_proto.go + +// Timestamp returns the Time as a new Timestamp value. +func (m *MicroTime) ProtoMicroTime() *Timestamp { + if m == nil { + return &Timestamp{} + } + return &Timestamp{ + Seconds: m.Time.Unix(), + Nanos: int32(m.Time.Nanosecond()), + } +} + +// Size implements the protobuf marshalling interface. +func (m *MicroTime) Size() (n int) { + if m == nil || m.Time.IsZero() { + return 0 + } + return m.ProtoMicroTime().Size() +} + +// Reset implements the protobuf marshalling interface. +func (m *MicroTime) Unmarshal(data []byte) error { + if len(data) == 0 { + m.Time = time.Time{} + return nil + } + p := Timestamp{} + if err := p.Unmarshal(data); err != nil { + return err + } + m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local() + return nil +} + +// Marshal implements the protobuf marshalling interface. +func (m *MicroTime) Marshal() (data []byte, err error) { + if m == nil || m.Time.IsZero() { + return nil, nil + } + return m.ProtoMicroTime().Marshal() +} + +// MarshalTo implements the protobuf marshalling interface. +func (m *MicroTime) MarshalTo(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoMicroTime().MarshalTo(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go new file mode 100644 index 000000000..6e449a436 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -0,0 +1,95 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "meta.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Unversioned is group version for unversioned API objects +// TODO: this should be v1 probably +var Unversioned = schema.GroupVersion{Group: "", Version: "v1"} + +// WatchEventKind is name reserved for serializing watch events. +const WatchEventKind = "WatchEvent" + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// AddToGroupVersion registers common meta types into schemas. +func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) { + scheme.AddKnownTypeWithName(groupVersion.WithKind(WatchEventKind), &WatchEvent{}) + scheme.AddKnownTypeWithName( + schema.GroupVersion{Group: groupVersion.Group, Version: runtime.APIVersionInternal}.WithKind(WatchEventKind), + &InternalEvent{}, + ) + // Supports legacy code paths, most callers should use metav1.ParameterCodec for now + scheme.AddKnownTypes(groupVersion, + &ListOptions{}, + &ExportOptions{}, + &GetOptions{}, + &DeleteOptions{}, + ) + scheme.AddConversionFuncs( + Convert_versioned_Event_to_watch_Event, + Convert_versioned_InternalEvent_to_versioned_Event, + Convert_watch_Event_to_versioned_Event, + Convert_versioned_Event_to_versioned_InternalEvent, + ) + + // Register Unversioned types under their own special group + scheme.AddUnversionedTypes(Unversioned, + &Status{}, + &APIVersions{}, + &APIGroupList{}, + &APIGroup{}, + &APIResourceList{}, + ) + + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) + AddConversionFuncs(scheme) + RegisterDefaults(scheme) +} + +// scheme is the registry for the common types that adhere to the meta v1 API spec. +var scheme = runtime.NewScheme() + +// ParameterCodec knows about query parameters used with the meta v1 API spec. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +func init() { + scheme.AddUnversionedTypes(SchemeGroupVersion, + &ListOptions{}, + &ExportOptions{}, + &GetOptions{}, + &DeleteOptions{}, + ) + + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) + RegisterDefaults(scheme) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go new file mode 100644 index 000000000..a1e01f344 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -0,0 +1,180 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" + + "k8s.io/apimachinery/pkg/openapi" + + "github.com/go-openapi/spec" + "github.com/google/gofuzz" +) + +// Time is a wrapper around time.Time which supports correct +// marshaling to YAML and JSON. Wrappers are provided for many +// of the factory methods that the time package offers. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +type Time struct { + time.Time `protobuf:"-"` +} + +// DeepCopy returns a deep-copy of the Time value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t Time) DeepCopy() Time { + return t +} + +// String returns the representation of the time. +func (t Time) String() string { + return t.Time.String() +} + +// NewTime returns a wrapped instance of the provided time +func NewTime(time time.Time) Time { + return Time{time} +} + +// Date returns the Time corresponding to the supplied parameters +// by wrapping time.Date. +func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time { + return Time{time.Date(year, month, day, hour, min, sec, nsec, loc)} +} + +// Now returns the current local time. +func Now() Time { + return Time{time.Now()} +} + +// IsZero returns true if the value is nil or time is zero. +func (t *Time) IsZero() bool { + if t == nil { + return true + } + return t.Time.IsZero() +} + +// Before reports whether the time instant t is before u. +func (t Time) Before(u Time) bool { + return t.Time.Before(u.Time) +} + +// Equal reports whether the time instant t is equal to u. +func (t Time) Equal(u Time) bool { + return t.Time.Equal(u.Time) +} + +// Unix returns the local time corresponding to the given Unix time +// by wrapping time.Unix. +func Unix(sec int64, nsec int64) Time { + return Time{time.Unix(sec, nsec)} +} + +// Rfc3339Copy returns a copy of the Time at second-level precision. +func (t Time) Rfc3339Copy() Time { + copied, _ := time.Parse(time.RFC3339, t.Format(time.RFC3339)) + return Time{copied} +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (t *Time) UnmarshalJSON(b []byte) error { + if len(b) == 4 && string(b) == "null" { + t.Time = time.Time{} + return nil + } + + var str string + json.Unmarshal(b, &str) + + pt, err := time.Parse(time.RFC3339, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// UnmarshalQueryParameter converts from a URL query parameter value to an object +func (t *Time) UnmarshalQueryParameter(str string) error { + if len(str) == 0 { + t.Time = time.Time{} + return nil + } + // Tolerate requests from older clients that used JSON serialization to build query params + if len(str) == 4 && str == "null" { + t.Time = time.Time{} + return nil + } + + pt, err := time.Parse(time.RFC3339, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + if t.IsZero() { + // Encode unset/nil objects as JSON's "null". + return []byte("null"), nil + } + + return json.Marshal(t.UTC().Format(time.RFC3339)) +} + +func (_ Time) OpenAPIDefinition() openapi.OpenAPIDefinition { + return openapi.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "date-time", + }, + }, + } +} + +// MarshalQueryParameter converts to a URL query parameter value +func (t Time) MarshalQueryParameter() (string, error) { + if t.IsZero() { + // Encode unset/nil objects as an empty string + return "", nil + } + + return t.UTC().Format(time.RFC3339), nil +} + +// Fuzz satisfies fuzz.Interface. +func (t *Time) Fuzz(c fuzz.Continue) { + if t == nil { + return + } + // Allow for about 1000 years of randomness. Leave off nanoseconds + // because JSON doesn't represent them so they can't round-trip + // properly. + t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0) +} + +var _ fuzz.Interface = &Time{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go new file mode 100644 index 000000000..5520529dd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go @@ -0,0 +1,85 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" +) + +// Timestamp is a struct that is equivalent to Time, but intended for +// protobuf marshalling/unmarshalling. It is generated into a serialization +// that matches Time. Do not use in Go structs. +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `json:"seconds" protobuf:"varint,1,opt,name=seconds"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + Nanos int32 `json:"nanos" protobuf:"varint,2,opt,name=nanos"` +} + +// Timestamp returns the Time as a new Timestamp value. +func (m *Time) ProtoTime() *Timestamp { + if m == nil { + return &Timestamp{} + } + return &Timestamp{ + Seconds: m.Time.Unix(), + Nanos: int32(m.Time.Nanosecond()), + } +} + +// Size implements the protobuf marshalling interface. +func (m *Time) Size() (n int) { + if m == nil || m.Time.IsZero() { + return 0 + } + return m.ProtoTime().Size() +} + +// Reset implements the protobuf marshalling interface. +func (m *Time) Unmarshal(data []byte) error { + if len(data) == 0 { + m.Time = time.Time{} + return nil + } + p := Timestamp{} + if err := p.Unmarshal(data); err != nil { + return err + } + m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local() + return nil +} + +// Marshal implements the protobuf marshalling interface. +func (m *Time) Marshal() (data []byte, err error) { + if m == nil || m.Time.IsZero() { + return nil, nil + } + return m.ProtoTime().Marshal() +} + +// MarshalTo implements the protobuf marshalling interface. +func (m *Time) MarshalTo(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoTime().MarshalTo(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go new file mode 100644 index 000000000..c5ac31f79 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -0,0 +1,843 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package unversioned contains API types that are common to all versions. +// +// The package contains two categories of types: +// - external (serialized) types that lack their own version (e.g TypeMeta) +// - internal (never-serialized) types that are needed by several different +// api groups, and so live here, to avoid duplication and/or import loops +// (e.g. LabelSelector). +// In the future, we will probably move these categories of objects into +// separate packages. +package v1 + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/types" +) + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +type TypeMeta struct { + // Kind is a string value representing the REST resource this object represents. + // Servers may infer this from the endpoint the client submits requests to. + // Cannot be updated. + // In CamelCase. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` + + // APIVersion defines the versioned schema of this representation of an object. + // Servers should convert recognized schemas to the latest internal value, and + // may reject unrecognized values. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` +} + +// ListMeta describes metadata that synthetic resources must have, including lists and +// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. +type ListMeta struct { + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` + + // String that identifies the server's internal version of this object that + // can be used by clients to determine when objects have changed. + // Value must be treated as opaque by clients and passed unmodified back to the server. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` +} + +// These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here +const ( + FinalizerOrphanDependents string = "orphan" + FinalizerDeleteDependents string = "foregroundDeletion" +) + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // +optional + GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"` + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + // +patchMergeKey=uid + // +patchStrategy=merge + OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` + + // An initializer is a controller which enforces some system invariant at object creation time. + // This field is a list of initializers that have not yet acted on this object. If nil or empty, + // this object has been completely initialized. Otherwise, the object is considered uninitialized + // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to + // observe uninitialized objects. + // + // When an object is created, the system will populate this list with the current set of initializers. + // Only privileged users may set or modify this list. Once it is empty, it may not be modified further + // by any user. + Initializers *Initializers `json:"initializers,omitempty" protobuf:"bytes,16,opt,name=initializers"` + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + // +patchStrategy=merge + Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` +} + +// Initializers tracks the progress of initialization. +type Initializers struct { + // Pending is a list of initializers that must execute in order before this object is visible. + // When the last pending initializer is removed, and no failing result is set, the initializers + // struct will be set to nil and the object is considered as initialized and visible to all + // clients. + Pending []Initializer `json:"pending" protobuf:"bytes,1,rep,name=pending"` + // If result is set with the Failure field, the object will be persisted to storage and then deleted, + // ensuring that other clients can observe the deletion. + Result *Status `json:"result,omitempty" protobuf:"bytes,2,opt,name=result"` +} + +// Initializer is information about an initializer that has not yet completed. +type Initializer struct { + // name of the process that is responsible for initializing this object. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault string = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll string = "" + // NamespaceNone is the argument for a context when there is no namespace. + NamespaceNone string = "" + // NamespaceSystem is the system namespace where we place system components. + NamespaceSystem string = "kube-system" + // NamespacePublic is the namespace where we place public info (ConfigMaps) + NamespacePublic string = "kube-public" +) + +// OwnerReference contains enough information to let you identify an owning +// object. Currently, an owning object must be in the same namespace, so there +// is no namespace field. +type OwnerReference struct { + // API version of the referent. + APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // UID of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // If true, this reference points to the managing controller. + // +optional + Controller *bool `json:"controller,omitempty" protobuf:"varint,6,opt,name=controller"` + // If true, AND if the owner has the "foregroundDeletion" finalizer, then + // the owner cannot be deleted from the key-value store until this + // reference is removed. + // Defaults to false. + // To set this field, a user needs "delete" permission of the owner, + // otherwise 422 (Unprocessable Entity) will be returned. + // +optional + BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty" protobuf:"varint,7,opt,name=blockOwnerDeletion"` +} + +// ListOptions is the query options to a standard REST list call. +type ListOptions struct { + TypeMeta `json:",inline"` + + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` + // If true, partially initialized resources are included in the response. + // +optional + IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,6,opt,name=includeUninitialized"` + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` + // Timeout for the list/watch call. + // +optional + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"` +} + +// ExportOptions is the query options to the standard REST get call. +type ExportOptions struct { + TypeMeta `json:",inline"` + // Should this value be exported. Export strips fields that a user can not specify. + Export bool `json:"export" protobuf:"varint,1,opt,name=export"` + // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. + Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` +} + +// GetOptions is the standard query options to the standard REST get call. +type GetOptions struct { + TypeMeta `json:",inline"` + // When specified: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,1,opt,name=resourceVersion"` + // If true, partially initialized resources are included in the response. + // +optional + IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,2,opt,name=includeUninitialized"` +} + +// DeletionPropagation decides if a deletion will propagate to the dependents of +// the object, and how the garbage collector will handle the propagation. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will + // delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector + // deletes all the dependents whose ownerReference.blockOwnerDeletion=true + // from the key-value store. API sever will put the "foregroundDeletion" + // finalizer on the object, and sets its deletionTimestamp. This policy is + // cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + +// DeleteOptions may be provided when deleting an API object. +type DeleteOptions struct { + TypeMeta `json:",inline"` + + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"` + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"` + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"` + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + PropagationPolicy *DeletionPropagation `json:"propagationPolicy,omitempty" protobuf:"varint,4,opt,name=propagationPolicy"` +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` +} + +// Status is a return value for calls that don't return other objects. +type Status struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Status of the operation. + // One of: "Success" or "Failure". + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // A human-readable description of the status of this operation. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // A machine-readable description of why this operation is in the + // "Failure" status. If this value is empty there + // is no information available. A Reason clarifies an HTTP status + // code but does not override it. + // +optional + Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason,casttype=StatusReason"` + // Extended data associated with the reason. Each reason may define its + // own extended details. This field is optional and the data returned + // is not guaranteed to conform to any schema except that defined by + // the reason type. + // +optional + Details *StatusDetails `json:"details,omitempty" protobuf:"bytes,5,opt,name=details"` + // Suggested HTTP return code for this status, 0 if not set. + // +optional + Code int32 `json:"code,omitempty" protobuf:"varint,6,opt,name=code"` +} + +// StatusDetails is a set of additional properties that MAY be set by the +// server to provide additional information about a response. The Reason +// field of a Status object defines what attributes will be set. Clients +// must ignore fields that do not match the defined type of each attribute, +// and should assume that any attribute may be empty, invalid, or under +// defined. +type StatusDetails struct { + // The name attribute of the resource associated with the status StatusReason + // (when there is a single name which can be described). + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // The group attribute of the resource associated with the status StatusReason. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` + // The kind attribute of the resource associated with the status StatusReason. + // On some operations may differ from the requested resource Kind. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` + // UID of the resource. + // (when there is a single resource which can be described). + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // The Causes array includes more details associated with the StatusReason + // failure. Not all StatusReasons may provide detailed causes. + // +optional + Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"` + // If specified, the time in seconds before the operation should be retried. + // +optional + RetryAfterSeconds int32 `json:"retryAfterSeconds,omitempty" protobuf:"varint,5,opt,name=retryAfterSeconds"` +} + +// Values of Status.Status +const ( + StatusSuccess = "Success" + StatusFailure = "Failure" +) + +// StatusReason is an enumeration of possible failure causes. Each StatusReason +// must map to a single HTTP status code, but multiple reasons may map +// to the same HTTP status code. +// TODO: move to apiserver +type StatusReason string + +const ( + // StatusReasonUnknown means the server has declined to indicate a specific reason. + // The details field may contain other information about this error. + // Status code 500. + StatusReasonUnknown StatusReason = "" + + // StatusReasonUnauthorized means the server can be reached and understood the request, but requires + // the user to present appropriate authorization credentials (identified by the WWW-Authenticate header) + // in order for the action to be completed. If the user has specified credentials on the request, the + // server considers them insufficient. + // Status code 401 + StatusReasonUnauthorized StatusReason = "Unauthorized" + + // StatusReasonForbidden means the server can be reached and understood the request, but refuses + // to take any further action. It is the result of the server being configured to deny access for some reason + // to the requested resource by the client. + // Details (optional): + // "kind" string - the kind attribute of the forbidden resource + // on some operations may differ from the requested + // resource. + // "id" string - the identifier of the forbidden resource + // Status code 403 + StatusReasonForbidden StatusReason = "Forbidden" + + // StatusReasonNotFound means one or more resources required for this operation + // could not be found. + // Details (optional): + // "kind" string - the kind attribute of the missing resource + // on some operations may differ from the requested + // resource. + // "id" string - the identifier of the missing resource + // Status code 404 + StatusReasonNotFound StatusReason = "NotFound" + + // StatusReasonAlreadyExists means the resource you are creating already exists. + // Details (optional): + // "kind" string - the kind attribute of the conflicting resource + // "id" string - the identifier of the conflicting resource + // Status code 409 + StatusReasonAlreadyExists StatusReason = "AlreadyExists" + + // StatusReasonConflict means the requested operation cannot be completed + // due to a conflict in the operation. The client may need to alter the + // request. Each resource may define custom details that indicate the + // nature of the conflict. + // Status code 409 + StatusReasonConflict StatusReason = "Conflict" + + // StatusReasonGone means the item is no longer available at the server and no + // forwarding address is known. + // Status code 410 + StatusReasonGone StatusReason = "Gone" + + // StatusReasonInvalid means the requested create or update operation cannot be + // completed due to invalid data provided as part of the request. The client may + // need to alter the request. When set, the client may use the StatusDetails + // message field as a summary of the issues encountered. + // Details (optional): + // "kind" string - the kind attribute of the invalid resource + // "id" string - the identifier of the invalid resource + // "causes" - one or more StatusCause entries indicating the data in the + // provided resource that was invalid. The code, message, and + // field attributes will be set. + // Status code 422 + StatusReasonInvalid StatusReason = "Invalid" + + // StatusReasonServerTimeout means the server can be reached and understood the request, + // but cannot complete the action in a reasonable time. The client should retry the request. + // This is may be due to temporary server load or a transient communication issue with + // another server. Status code 500 is used because the HTTP spec provides no suitable + // server-requested client retry and the 5xx class represents actionable errors. + // Details (optional): + // "kind" string - the kind attribute of the resource being acted on. + // "id" string - the operation that is being attempted. + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 500 + StatusReasonServerTimeout StatusReason = "ServerTimeout" + + // StatusReasonTimeout means that the request could not be completed within the given time. + // Clients can get this response only when they specified a timeout param in the request, + // or if the server cannot complete the operation within a reasonable amount of time. + // The request might succeed with an increased value of timeout param. The client *should* + // wait at least the number of seconds specified by the retryAfterSeconds field. + // Details (optional): + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 504 + StatusReasonTimeout StatusReason = "Timeout" + + // StatusReasonBadRequest means that the request itself was invalid, because the request + // doesn't make any sense, for example deleting a read-only object. This is different than + // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the + // data was invalid. API calls that return BadRequest can never succeed. + StatusReasonBadRequest StatusReason = "BadRequest" + + // StatusReasonMethodNotAllowed means that the action the client attempted to perform on the + // resource was not supported by the code - for instance, attempting to delete a resource that + // can only be created. API calls that return MethodNotAllowed can never succeed. + StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" + + // StatusReasonInternalError indicates that an internal error occurred, it is unexpected + // and the outcome of the call is unknown. + // Details (optional): + // "causes" - The original error + // Status code 500 + StatusReasonInternalError StatusReason = "InternalError" + + // StatusReasonExpired indicates that the request is invalid because the content you are requesting + // has expired and is no longer available. It is typically associated with watches that can't be + // serviced. + // Status code 410 (gone) + StatusReasonExpired StatusReason = "Expired" + + // StatusReasonServiceUnavailable means that the request itself was valid, + // but the requested service is unavailable at this time. + // Retrying the request after some time might succeed. + // Status code 503 + StatusReasonServiceUnavailable StatusReason = "ServiceUnavailable" +) + +// StatusCause provides more information about an api.Status failure, including +// cases when multiple errors are encountered. +type StatusCause struct { + // A machine-readable description of the cause of the error. If this value is + // empty there is no information available. + // +optional + Type CauseType `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason,casttype=CauseType"` + // A human-readable description of the cause of the error. This field may be + // presented as-is to a reader. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // The field of the resource that has caused this error, as named by its JSON + // serialization. May include dot and postfix notation for nested attributes. + // Arrays are zero-indexed. Fields may appear more than once in an array of + // causes due to fields having multiple errors. + // Optional. + // + // Examples: + // "name" - the field "name" on the current resource + // "items[0].name" - the field "name" on the first array entry in "items" + // +optional + Field string `json:"field,omitempty" protobuf:"bytes,3,opt,name=field"` +} + +// CauseType is a machine readable value providing more detail about what +// occurred in a status response. An operation may have multiple causes for a +// status (whether Failure or Success). +type CauseType string + +const ( + // CauseTypeFieldValueNotFound is used to report failure to find a requested value + // (e.g. looking up an ID). + CauseTypeFieldValueNotFound CauseType = "FieldValueNotFound" + // CauseTypeFieldValueRequired is used to report required values that are not + // provided (e.g. empty strings, null values, or empty arrays). + CauseTypeFieldValueRequired CauseType = "FieldValueRequired" + // CauseTypeFieldValueDuplicate is used to report collisions of values that must be + // unique (e.g. unique IDs). + CauseTypeFieldValueDuplicate CauseType = "FieldValueDuplicate" + // CauseTypeFieldValueInvalid is used to report malformed values (e.g. failed regex + // match). + CauseTypeFieldValueInvalid CauseType = "FieldValueInvalid" + // CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules) + // values that can not be handled (e.g. an enumerated string). + CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported" + // CauseTypeUnexpectedServerResponse is used to report when the server responded to the client + // without the expected return type. The presence of this cause indicates the error may be + // due to an intervening proxy or the server software malfunctioning. + CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse" +) + +// APIVersions lists the versions that are available, to allow clients to +// discover the API at /api, which is the root path of the legacy v1 API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type APIVersions struct { + TypeMeta `json:",inline"` + // versions are the api versions that are available. + Versions []string `json:"versions" protobuf:"bytes,1,rep,name=versions"` + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"` +} + +// APIGroupList is a list of APIGroup, to allow clients to discover the API at +// /apis. +type APIGroupList struct { + TypeMeta `json:",inline"` + // groups is a list of APIGroup. + Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"` +} + +// APIGroup contains the name, the supported versions, and the preferred version +// of a group. +type APIGroup struct { + TypeMeta `json:",inline"` + // name is the name of the group. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // versions are the versions supported in this group. + Versions []GroupVersionForDiscovery `json:"versions" protobuf:"bytes,2,rep,name=versions"` + // preferredVersion is the version preferred by the API server, which + // probably is the storage version. + // +optional + PreferredVersion GroupVersionForDiscovery `json:"preferredVersion,omitempty" protobuf:"bytes,3,opt,name=preferredVersion"` + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` +} + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +type ServerAddressByClientCIDR struct { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` +} + +// GroupVersion contains the "group/version" and "version" string of a version. +// It is made a struct to keep extensibility. +type GroupVersionForDiscovery struct { + // groupVersion specifies the API group and version in the form "group/version" + GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` + // version specifies the version in the form of "version". This is to save + // the clients the trouble of splitting the GroupVersion. + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` +} + +// APIResource specifies the name of a resource and whether it is namespaced. +type APIResource struct { + // name is the plural name of the resource. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // The singularName is more correct for reporting status on a single item and both singular and plural are allowed + // from the kubectl CLI interface. + SingularName string `json:"singularName" protobuf:"bytes,6,opt,name=singularName"` + // namespaced indicates if a resource is namespaced or not. + Namespaced bool `json:"namespaced" protobuf:"varint,2,opt,name=namespaced"` + // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') + Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"` + // verbs is a list of supported kube verbs (this includes get, list, watch, create, + // update, patch, delete, deletecollection, and proxy) + Verbs Verbs `json:"verbs" protobuf:"bytes,4,opt,name=verbs"` + // shortNames is a list of suggested short names of the resource. + ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"` + // categories is a list of the grouped resources this resource belongs to (e.g. 'all') + Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"` +} + +// Verbs masks the value so protobuf can generate +// +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type Verbs []string + +func (vs Verbs) String() string { + return fmt.Sprintf("%v", []string(vs)) +} + +// APIResourceList is a list of APIResource, it is used to expose the name of the +// resources supported in a specific group and version, and if the resource +// is namespaced. +type APIResourceList struct { + TypeMeta `json:",inline"` + // groupVersion is the group and version this APIResourceList is for. + GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` + // resources contains the name of the resources and if they are namespaced. + APIResources []APIResource `json:"resources" protobuf:"bytes,2,rep,name=resources"` +} + +// RootPaths lists the paths available at root. +// For example: "/healthz", "/apis". +type RootPaths struct { + // paths are the paths available at root. + Paths []string `json:"paths" protobuf:"bytes,1,rep,name=paths"` +} + +// TODO: remove me when watch is refactored +func LabelSelectorQueryParam(version string) string { + return "labelSelector" +} + +// TODO: remove me when watch is refactored +func FieldSelectorQueryParam(version string) string { + return "fieldSelector" +} + +// String returns available api versions as a human-friendly version string. +func (apiVersions APIVersions) String() string { + return strings.Join(apiVersions.Versions, ",") +} + +func (apiVersions APIVersions) GoString() string { + return apiVersions.String() +} + +// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. +type Patch struct{} + +// Note: +// There are two different styles of label selectors used in versioned types: +// an older style which is represented as just a string in versioned types, and a +// newer style that is structured. LabelSelector is an internal representation for the +// latter style. + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +type LabelSelector struct { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + // +optional + MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + // +optional + MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +type LabelSelectorRequirement struct { + // key is the label key that the selector applies to. + // +patchMergeKey=key + // +patchStrategy=merge + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // operator represents a key's relationship to a set of values. + // Valid operators ard In, NotIn, Exists and DoesNotExist. + Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + // +optional + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A label selector operator is the set of operators that can be used in a selector requirement. +type LabelSelectorOperator string + +const ( + LabelSelectorOpIn LabelSelectorOperator = "In" + LabelSelectorOpNotIn LabelSelectorOperator = "NotIn" + LabelSelectorOpExists LabelSelectorOperator = "Exists" + LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..159164d7c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -0,0 +1,315 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_APIGroup = map[string]string{ + "": "APIGroup contains the name, the supported versions, and the preferred version of a group.", + "name": "name is the name of the group.", + "versions": "versions are the versions supported in this group.", + "preferredVersion": "preferredVersion is the version preferred by the API server, which probably is the storage version.", + "serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", +} + +func (APIGroup) SwaggerDoc() map[string]string { + return map_APIGroup +} + +var map_APIGroupList = map[string]string{ + "": "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.", + "groups": "groups is a list of APIGroup.", +} + +func (APIGroupList) SwaggerDoc() map[string]string { + return map_APIGroupList +} + +var map_APIResource = map[string]string{ + "": "APIResource specifies the name of a resource and whether it is namespaced.", + "name": "name is the plural name of the resource.", + "singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", + "namespaced": "namespaced indicates if a resource is namespaced or not.", + "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", + "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", + "shortNames": "shortNames is a list of suggested short names of the resource.", + "categories": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", +} + +func (APIResource) SwaggerDoc() map[string]string { + return map_APIResource +} + +var map_APIResourceList = map[string]string{ + "": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.", + "groupVersion": "groupVersion is the group and version this APIResourceList is for.", + "resources": "resources contains the name of the resources and if they are namespaced.", +} + +func (APIResourceList) SwaggerDoc() map[string]string { + return map_APIResourceList +} + +var map_APIVersions = map[string]string{ + "": "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.", + "versions": "versions are the api versions that are available.", + "serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", +} + +func (APIVersions) SwaggerDoc() map[string]string { + return map_APIVersions +} + +var map_DeleteOptions = map[string]string{ + "": "DeleteOptions may be provided when deleting an API object.", + "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", + "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", +} + +func (DeleteOptions) SwaggerDoc() map[string]string { + return map_DeleteOptions +} + +var map_ExportOptions = map[string]string{ + "": "ExportOptions is the query options to the standard REST get call.", + "export": "Should this value be exported. Export strips fields that a user can not specify.", + "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.", +} + +func (ExportOptions) SwaggerDoc() map[string]string { + return map_ExportOptions +} + +var map_GetOptions = map[string]string{ + "": "GetOptions is the standard query options to the standard REST get call.", + "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "includeUninitialized": "If true, partially initialized resources are included in the response.", +} + +func (GetOptions) SwaggerDoc() map[string]string { + return map_GetOptions +} + +var map_GroupVersionForDiscovery = map[string]string{ + "": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.", + "groupVersion": "groupVersion specifies the API group and version in the form \"group/version\"", + "version": "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.", +} + +func (GroupVersionForDiscovery) SwaggerDoc() map[string]string { + return map_GroupVersionForDiscovery +} + +var map_Initializer = map[string]string{ + "": "Initializer is information about an initializer that has not yet completed.", + "name": "name of the process that is responsible for initializing this object.", +} + +func (Initializer) SwaggerDoc() map[string]string { + return map_Initializer +} + +var map_Initializers = map[string]string{ + "": "Initializers tracks the progress of initialization.", + "pending": "Pending is a list of initializers that must execute in order before this object is visible. When the last pending initializer is removed, and no failing result is set, the initializers struct will be set to nil and the object is considered as initialized and visible to all clients.", + "result": "If result is set with the Failure field, the object will be persisted to storage and then deleted, ensuring that other clients can observe the deletion.", +} + +func (Initializers) SwaggerDoc() map[string]string { + return map_Initializers +} + +var map_LabelSelector = map[string]string{ + "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", + "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", + "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", +} + +func (LabelSelector) SwaggerDoc() map[string]string { + return map_LabelSelector +} + +var map_LabelSelectorRequirement = map[string]string{ + "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "key is the label key that the selector applies to.", + "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.", + "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", +} + +func (LabelSelectorRequirement) SwaggerDoc() map[string]string { + return map_LabelSelectorRequirement +} + +var map_ListMeta = map[string]string{ + "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", + "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", +} + +func (ListMeta) SwaggerDoc() map[string]string { + return map_ListMeta +} + +var map_ListOptions = map[string]string{ + "": "ListOptions is the query options to a standard REST list call.", + "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "includeUninitialized": "If true, partially initialized resources are included in the response.", + "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "timeoutSeconds": "Timeout for the list/watch call.", +} + +func (ListOptions) SwaggerDoc() map[string]string { + return map_ListOptions +} + +var map_ObjectMeta = map[string]string{ + "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency", + "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", + "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", + "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.", + "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", + "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", +} + +func (ObjectMeta) SwaggerDoc() map[string]string { + return map_ObjectMeta +} + +var map_OwnerReference = map[string]string{ + "": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.", + "apiVersion": "API version of the referent.", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "controller": "If true, this reference points to the managing controller.", + "blockOwnerDeletion": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", +} + +func (OwnerReference) SwaggerDoc() map[string]string { + return map_OwnerReference +} + +var map_Patch = map[string]string{ + "": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", +} + +func (Patch) SwaggerDoc() map[string]string { + return map_Patch +} + +var map_Preconditions = map[string]string{ + "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "uid": "Specifies the target UID.", +} + +func (Preconditions) SwaggerDoc() map[string]string { + return map_Preconditions +} + +var map_RootPaths = map[string]string{ + "": "RootPaths lists the paths available at root. For example: \"/healthz\", \"/apis\".", + "paths": "paths are the paths available at root.", +} + +func (RootPaths) SwaggerDoc() map[string]string { + return map_RootPaths +} + +var map_ServerAddressByClientCIDR = map[string]string{ + "": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.", + "clientCIDR": "The CIDR with which clients can match their IP to figure out the server address that they should use.", + "serverAddress": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.", +} + +func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { + return map_ServerAddressByClientCIDR +} + +var map_Status = map[string]string{ + "": "Status is a return value for calls that don't return other objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "message": "A human-readable description of the status of this operation.", + "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", + "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", + "code": "Suggested HTTP return code for this status, 0 if not set.", +} + +func (Status) SwaggerDoc() map[string]string { + return map_Status +} + +var map_StatusCause = map[string]string{ + "": "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.", + "reason": "A machine-readable description of the cause of the error. If this value is empty there is no information available.", + "message": "A human-readable description of the cause of the error. This field may be presented as-is to a reader.", + "field": "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"", +} + +func (StatusCause) SwaggerDoc() map[string]string { + return map_StatusCause +} + +var map_StatusDetails = map[string]string{ + "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", + "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", + "group": "The group attribute of the resource associated with the status StatusReason.", + "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "uid": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", + "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried.", +} + +func (StatusDetails) SwaggerDoc() map[string]string { + return map_StatusDetails +} + +var map_TypeMeta = map[string]string{ + "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", + "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", +} + +func (TypeMeta) SwaggerDoc() map[string]string { + return map_TypeMeta +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go new file mode 100644 index 000000000..fed687e9b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -0,0 +1,752 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructured + +import ( + "bytes" + gojson "encoding/json" + "errors" + "fmt" + "io" + "strings" + + "github.com/golang/glog" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// Unstructured allows objects that do not have Golang structs registered to be manipulated +// generically. This can be used to deal with the API objects from a plug-in. Unstructured +// objects still have functioning TypeMeta features-- kind, version, etc. +// +// WARNING: This object has accessors for the v1 standard metadata. You *MUST NOT* use this +// type if you are dealing with objects that are not in the server meta v1 schema. +// +// TODO: make the serialization part of this type distinct from the field accessors. +type Unstructured struct { + // Object is a JSON compatible map with string, float, int, bool, []interface{}, or + // map[string]interface{} + // children. + Object map[string]interface{} +} + +var _ metav1.Object = &Unstructured{} +var _ runtime.Unstructured = &Unstructured{} +var _ runtime.Unstructured = &UnstructuredList{} + +func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } +func (obj *UnstructuredList) GetObjectKind() schema.ObjectKind { return obj } + +func (obj *Unstructured) IsUnstructuredObject() {} +func (obj *UnstructuredList) IsUnstructuredObject() {} + +func (obj *Unstructured) IsList() bool { + if obj.Object != nil { + _, ok := obj.Object["items"] + return ok + } + return false +} +func (obj *UnstructuredList) IsList() bool { return true } + +func (obj *Unstructured) UnstructuredContent() map[string]interface{} { + if obj.Object == nil { + obj.Object = make(map[string]interface{}) + } + return obj.Object +} + +// UnstructuredContent returns a map contain an overlay of the Items field onto +// the Object field. Items always overwrites overlay. Changing "items" in the +// returned object will affect items in the underlying Items field, but changing +// the "items" slice itself will have no effect. +// TODO: expose SetUnstructuredContent on runtime.Unstructured that allows +// items to be changed. +func (obj *UnstructuredList) UnstructuredContent() map[string]interface{} { + out := obj.Object + if out == nil { + out = make(map[string]interface{}) + } + items := make([]interface{}, len(obj.Items)) + for i, item := range obj.Items { + items[i] = item.Object + } + out["items"] = items + return out +} + +// MarshalJSON ensures that the unstructured object produces proper +// JSON when passed to Go's standard JSON library. +func (u *Unstructured) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := UnstructuredJSONScheme.Encode(u, &buf) + return buf.Bytes(), err +} + +// UnmarshalJSON ensures that the unstructured object properly decodes +// JSON when passed to Go's standard JSON library. +func (u *Unstructured) UnmarshalJSON(b []byte) error { + _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) + return err +} + +func getNestedField(obj map[string]interface{}, fields ...string) interface{} { + var val interface{} = obj + for _, field := range fields { + if _, ok := val.(map[string]interface{}); !ok { + return nil + } + val = val.(map[string]interface{})[field] + } + return val +} + +func getNestedString(obj map[string]interface{}, fields ...string) string { + if str, ok := getNestedField(obj, fields...).(string); ok { + return str + } + return "" +} + +func getNestedInt64(obj map[string]interface{}, fields ...string) int64 { + if str, ok := getNestedField(obj, fields...).(int64); ok { + return str + } + return 0 +} + +func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 { + if str, ok := getNestedField(obj, fields...).(*int64); ok { + return str + } + return nil +} + +func getNestedSlice(obj map[string]interface{}, fields ...string) []string { + if m, ok := getNestedField(obj, fields...).([]interface{}); ok { + strSlice := make([]string, 0, len(m)) + for _, v := range m { + if str, ok := v.(string); ok { + strSlice = append(strSlice, str) + } + } + return strSlice + } + return nil +} + +func getNestedMap(obj map[string]interface{}, fields ...string) map[string]string { + if m, ok := getNestedField(obj, fields...).(map[string]interface{}); ok { + strMap := make(map[string]string, len(m)) + for k, v := range m { + if str, ok := v.(string); ok { + strMap[k] = str + } + } + return strMap + } + return nil +} + +func setNestedField(obj map[string]interface{}, value interface{}, fields ...string) { + m := obj + if len(fields) > 1 { + for _, field := range fields[0 : len(fields)-1] { + if _, ok := m[field].(map[string]interface{}); !ok { + m[field] = make(map[string]interface{}) + } + m = m[field].(map[string]interface{}) + } + } + m[fields[len(fields)-1]] = value +} + +func setNestedSlice(obj map[string]interface{}, value []string, fields ...string) { + m := make([]interface{}, 0, len(value)) + for _, v := range value { + m = append(m, v) + } + setNestedField(obj, m, fields...) +} + +func setNestedMap(obj map[string]interface{}, value map[string]string, fields ...string) { + m := make(map[string]interface{}, len(value)) + for k, v := range value { + m[k] = v + } + setNestedField(obj, m, fields...) +} + +func (u *Unstructured) setNestedField(value interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + setNestedField(u.Object, value, fields...) +} + +func (u *Unstructured) setNestedSlice(value []string, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + setNestedSlice(u.Object, value, fields...) +} + +func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + setNestedMap(u.Object, value, fields...) +} + +func extractOwnerReference(src interface{}) metav1.OwnerReference { + v := src.(map[string]interface{}) + // though this field is a *bool, but when decoded from JSON, it's + // unmarshalled as bool. + var controllerPtr *bool + controller, ok := (getNestedField(v, "controller")).(bool) + if !ok { + controllerPtr = nil + } else { + controllerCopy := controller + controllerPtr = &controllerCopy + } + var blockOwnerDeletionPtr *bool + blockOwnerDeletion, ok := (getNestedField(v, "blockOwnerDeletion")).(bool) + if !ok { + blockOwnerDeletionPtr = nil + } else { + blockOwnerDeletionCopy := blockOwnerDeletion + blockOwnerDeletionPtr = &blockOwnerDeletionCopy + } + return metav1.OwnerReference{ + Kind: getNestedString(v, "kind"), + Name: getNestedString(v, "name"), + APIVersion: getNestedString(v, "apiVersion"), + UID: (types.UID)(getNestedString(v, "uid")), + Controller: controllerPtr, + BlockOwnerDeletion: blockOwnerDeletionPtr, + } +} + +func setOwnerReference(src metav1.OwnerReference) map[string]interface{} { + ret := make(map[string]interface{}) + setNestedField(ret, src.Kind, "kind") + setNestedField(ret, src.Name, "name") + setNestedField(ret, src.APIVersion, "apiVersion") + setNestedField(ret, string(src.UID), "uid") + // json.Unmarshal() extracts boolean json fields as bool, not as *bool and hence extractOwnerReference() + // expects bool or a missing field, not *bool. So if pointer is nil, fields are omitted from the ret object. + // If pointer is non-nil, they are set to the referenced value. + if src.Controller != nil { + setNestedField(ret, *src.Controller, "controller") + } + if src.BlockOwnerDeletion != nil { + setNestedField(ret, *src.BlockOwnerDeletion, "blockOwnerDeletion") + } + return ret +} + +func getOwnerReferences(object map[string]interface{}) ([]map[string]interface{}, error) { + field := getNestedField(object, "metadata", "ownerReferences") + if field == nil { + return nil, fmt.Errorf("cannot find field metadata.ownerReferences in %v", object) + } + ownerReferences, ok := field.([]map[string]interface{}) + if ok { + return ownerReferences, nil + } + // TODO: This is hacky... + interfaces, ok := field.([]interface{}) + if !ok { + return nil, fmt.Errorf("expect metadata.ownerReferences to be a slice in %#v", object) + } + ownerReferences = make([]map[string]interface{}, 0, len(interfaces)) + for i := 0; i < len(interfaces); i++ { + r, ok := interfaces[i].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("expect element metadata.ownerReferences to be a map[string]interface{} in %#v", object) + } + ownerReferences = append(ownerReferences, r) + } + return ownerReferences, nil +} + +func (u *Unstructured) GetOwnerReferences() []metav1.OwnerReference { + original, err := getOwnerReferences(u.Object) + if err != nil { + glog.V(6).Info(err) + return nil + } + ret := make([]metav1.OwnerReference, 0, len(original)) + for i := 0; i < len(original); i++ { + ret = append(ret, extractOwnerReference(original[i])) + } + return ret +} + +func (u *Unstructured) SetOwnerReferences(references []metav1.OwnerReference) { + var newReferences = make([]map[string]interface{}, 0, len(references)) + for i := 0; i < len(references); i++ { + newReferences = append(newReferences, setOwnerReference(references[i])) + } + u.setNestedField(newReferences, "metadata", "ownerReferences") +} + +func (u *Unstructured) GetAPIVersion() string { + return getNestedString(u.Object, "apiVersion") +} + +func (u *Unstructured) SetAPIVersion(version string) { + u.setNestedField(version, "apiVersion") +} + +func (u *Unstructured) GetKind() string { + return getNestedString(u.Object, "kind") +} + +func (u *Unstructured) SetKind(kind string) { + u.setNestedField(kind, "kind") +} + +func (u *Unstructured) GetNamespace() string { + return getNestedString(u.Object, "metadata", "namespace") +} + +func (u *Unstructured) SetNamespace(namespace string) { + u.setNestedField(namespace, "metadata", "namespace") +} + +func (u *Unstructured) GetName() string { + return getNestedString(u.Object, "metadata", "name") +} + +func (u *Unstructured) SetName(name string) { + u.setNestedField(name, "metadata", "name") +} + +func (u *Unstructured) GetGenerateName() string { + return getNestedString(u.Object, "metadata", "generateName") +} + +func (u *Unstructured) SetGenerateName(name string) { + u.setNestedField(name, "metadata", "generateName") +} + +func (u *Unstructured) GetUID() types.UID { + return types.UID(getNestedString(u.Object, "metadata", "uid")) +} + +func (u *Unstructured) SetUID(uid types.UID) { + u.setNestedField(string(uid), "metadata", "uid") +} + +func (u *Unstructured) GetResourceVersion() string { + return getNestedString(u.Object, "metadata", "resourceVersion") +} + +func (u *Unstructured) SetResourceVersion(version string) { + u.setNestedField(version, "metadata", "resourceVersion") +} + +func (u *Unstructured) GetGeneration() int64 { + return getNestedInt64(u.Object, "metadata", "generation") +} + +func (u *Unstructured) SetGeneration(generation int64) { + u.setNestedField(generation, "metadata", "generation") +} + +func (u *Unstructured) GetSelfLink() string { + return getNestedString(u.Object, "metadata", "selfLink") +} + +func (u *Unstructured) SetSelfLink(selfLink string) { + u.setNestedField(selfLink, "metadata", "selfLink") +} + +func (u *Unstructured) GetCreationTimestamp() metav1.Time { + var timestamp metav1.Time + timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp")) + return timestamp +} + +func (u *Unstructured) SetCreationTimestamp(timestamp metav1.Time) { + ts, _ := timestamp.MarshalQueryParameter() + u.setNestedField(ts, "metadata", "creationTimestamp") +} + +func (u *Unstructured) GetDeletionTimestamp() *metav1.Time { + var timestamp metav1.Time + timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "deletionTimestamp")) + if timestamp.IsZero() { + return nil + } + return ×tamp +} + +func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) { + if timestamp == nil { + u.setNestedField(nil, "metadata", "deletionTimestamp") + return + } + ts, _ := timestamp.MarshalQueryParameter() + u.setNestedField(ts, "metadata", "deletionTimestamp") +} + +func (u *Unstructured) GetDeletionGracePeriodSeconds() *int64 { + return getNestedInt64Pointer(u.Object, "metadata", "deletionGracePeriodSeconds") +} + +func (u *Unstructured) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) { + u.setNestedField(deletionGracePeriodSeconds, "metadata", "deletionGracePeriodSeconds") +} + +func (u *Unstructured) GetLabels() map[string]string { + return getNestedMap(u.Object, "metadata", "labels") +} + +func (u *Unstructured) SetLabels(labels map[string]string) { + u.setNestedMap(labels, "metadata", "labels") +} + +func (u *Unstructured) GetAnnotations() map[string]string { + return getNestedMap(u.Object, "metadata", "annotations") +} + +func (u *Unstructured) SetAnnotations(annotations map[string]string) { + u.setNestedMap(annotations, "metadata", "annotations") +} + +func (u *Unstructured) SetGroupVersionKind(gvk schema.GroupVersionKind) { + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) +} + +func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind { + gv, err := schema.ParseGroupVersion(u.GetAPIVersion()) + if err != nil { + return schema.GroupVersionKind{} + } + gvk := gv.WithKind(u.GetKind()) + return gvk +} + +var converter = unstructured.NewConverter(false) + +func (u *Unstructured) GetInitializers() *metav1.Initializers { + field := getNestedField(u.Object, "metadata", "initializers") + if field == nil { + return nil + } + obj, ok := field.(map[string]interface{}) + if !ok { + return nil + } + out := &metav1.Initializers{} + if err := converter.FromUnstructured(obj, out); err != nil { + utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) + } + return out +} + +func (u *Unstructured) SetInitializers(initializers *metav1.Initializers) { + if initializers == nil { + setNestedField(u.Object, nil, "metadata", "initializers") + return + } + out := make(map[string]interface{}) + if err := converter.ToUnstructured(initializers, &out); err != nil { + utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) + } + setNestedField(u.Object, out, "metadata", "initializers") +} + +func (u *Unstructured) GetFinalizers() []string { + return getNestedSlice(u.Object, "metadata", "finalizers") +} + +func (u *Unstructured) SetFinalizers(finalizers []string) { + u.setNestedSlice(finalizers, "metadata", "finalizers") +} + +func (u *Unstructured) GetClusterName() string { + return getNestedString(u.Object, "metadata", "clusterName") +} + +func (u *Unstructured) SetClusterName(clusterName string) { + u.setNestedField(clusterName, "metadata", "clusterName") +} + +// UnstructuredList allows lists that do not have Golang structs +// registered to be manipulated generically. This can be used to deal +// with the API lists from a plug-in. +type UnstructuredList struct { + Object map[string]interface{} + + // Items is a list of unstructured objects. + Items []Unstructured `json:"items"` +} + +// MarshalJSON ensures that the unstructured list object produces proper +// JSON when passed to Go's standard JSON library. +func (u *UnstructuredList) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := UnstructuredJSONScheme.Encode(u, &buf) + return buf.Bytes(), err +} + +// UnmarshalJSON ensures that the unstructured list object properly +// decodes JSON when passed to Go's standard JSON library. +func (u *UnstructuredList) UnmarshalJSON(b []byte) error { + _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) + return err +} + +func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + setNestedField(u.Object, value, fields...) +} + +func (u *UnstructuredList) GetAPIVersion() string { + return getNestedString(u.Object, "apiVersion") +} + +func (u *UnstructuredList) SetAPIVersion(version string) { + u.setNestedField(version, "apiVersion") +} + +func (u *UnstructuredList) GetKind() string { + return getNestedString(u.Object, "kind") +} + +func (u *UnstructuredList) SetKind(kind string) { + u.setNestedField(kind, "kind") +} + +func (u *UnstructuredList) GetResourceVersion() string { + return getNestedString(u.Object, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) SetResourceVersion(version string) { + u.setNestedField(version, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) GetSelfLink() string { + return getNestedString(u.Object, "metadata", "selfLink") +} + +func (u *UnstructuredList) SetSelfLink(selfLink string) { + u.setNestedField(selfLink, "metadata", "selfLink") +} + +func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) +} + +func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind { + gv, err := schema.ParseGroupVersion(u.GetAPIVersion()) + if err != nil { + return schema.GroupVersionKind{} + } + gvk := gv.WithKind(u.GetKind()) + return gvk +} + +// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured +// type, which can be used for generic access to objects without a predefined scheme. +// TODO: move into serializer/json. +var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{} + +type unstructuredJSONScheme struct{} + +func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + var err error + if obj != nil { + err = s.decodeInto(data, obj) + } else { + obj, err = s.decode(data) + } + + if err != nil { + return nil, nil, err + } + + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, &gvk, runtime.NewMissingKindErr(string(data)) + } + + return obj, &gvk, nil +} + +func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { + switch t := obj.(type) { + case *Unstructured: + return json.NewEncoder(w).Encode(t.Object) + case *UnstructuredList: + items := make([]map[string]interface{}, 0, len(t.Items)) + for _, i := range t.Items { + items = append(items, i.Object) + } + t.Object["items"] = items + defer func() { delete(t.Object, "items") }() + return json.NewEncoder(w).Encode(t.Object) + case *runtime.Unknown: + // TODO: Unstructured needs to deal with ContentType. + _, err := w.Write(t.Raw) + return err + default: + return json.NewEncoder(w).Encode(t) + } +} + +func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { + type detector struct { + Items gojson.RawMessage + } + var det detector + if err := json.Unmarshal(data, &det); err != nil { + return nil, err + } + + if det.Items != nil { + list := &UnstructuredList{} + err := s.decodeToList(data, list) + return list, err + } + + // No Items field, so it wasn't a list. + unstruct := &Unstructured{} + err := s.decodeToUnstructured(data, unstruct) + return unstruct, err +} + +func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error { + switch x := obj.(type) { + case *Unstructured: + return s.decodeToUnstructured(data, x) + case *UnstructuredList: + return s.decodeToList(data, x) + case *runtime.VersionedObjects: + o, err := s.decode(data) + if err == nil { + x.Objects = []runtime.Object{o} + } + return err + default: + return json.Unmarshal(data, x) + } +} + +func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error { + m := make(map[string]interface{}) + if err := json.Unmarshal(data, &m); err != nil { + return err + } + + unstruct.Object = m + + return nil +} + +func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { + type decodeList struct { + Items []gojson.RawMessage + } + + var dList decodeList + if err := json.Unmarshal(data, &dList); err != nil { + return err + } + + if err := json.Unmarshal(data, &list.Object); err != nil { + return err + } + + // For typed lists, e.g., a PodList, API server doesn't set each item's + // APIVersion and Kind. We need to set it. + listAPIVersion := list.GetAPIVersion() + listKind := list.GetKind() + itemKind := strings.TrimSuffix(listKind, "List") + + delete(list.Object, "items") + list.Items = nil + for _, i := range dList.Items { + unstruct := &Unstructured{} + if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil { + return err + } + // This is hacky. Set the item's Kind and APIVersion to those inferred + // from the List. + if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 { + unstruct.SetKind(itemKind) + unstruct.SetAPIVersion(listAPIVersion) + } + list.Items = append(list.Items, *unstruct) + } + return nil +} + +// UnstructuredObjectConverter is an ObjectConverter for use with +// Unstructured objects. Since it has no schema or type information, +// it will only succeed for no-op conversions. This is provided as a +// sane implementation for APIs that require an object converter. +type UnstructuredObjectConverter struct{} + +func (UnstructuredObjectConverter) Convert(in, out, context interface{}) error { + unstructIn, ok := in.(*Unstructured) + if !ok { + return fmt.Errorf("input type %T in not valid for unstructured conversion", in) + } + + unstructOut, ok := out.(*Unstructured) + if !ok { + return fmt.Errorf("output type %T in not valid for unstructured conversion", out) + } + + // maybe deep copy the map? It is documented in the + // ObjectConverter interface that this function is not + // guaranteeed to not mutate the input. Or maybe set the input + // object to nil. + unstructOut.Object = unstructIn.Object + return nil +} + +func (UnstructuredObjectConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { + if kind := in.GetObjectKind().GroupVersionKind(); !kind.Empty() { + gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{kind}) + if !ok { + // TODO: should this be a typed error? + return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target) + } + in.GetObjectKind().SetGroupVersionKind(gvk) + } + return in, nil +} + +func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + return "", "", errors.New("unstructured cannot convert field labels") +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go new file mode 100644 index 000000000..a645501a1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go @@ -0,0 +1,80 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" +) + +// Event represents a single event to a watched resource. +// +// +protobuf=true +type WatchEvent struct { + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + + // Object is: + // * If Type is Added or Modified: the new state of the object. + // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Error: *Status is recommended; other types may make sense + // depending on context. + Object runtime.RawExtension `json:"object" protobuf:"bytes,2,opt,name=object"` +} + +func Convert_watch_Event_to_versioned_Event(in *watch.Event, out *WatchEvent, s conversion.Scope) error { + out.Type = string(in.Type) + switch t := in.Object.(type) { + case *runtime.Unknown: + // TODO: handle other fields on Unknown and detect type + out.Object.Raw = t.Raw + case nil: + default: + out.Object.Object = in.Object + } + return nil +} + +func Convert_versioned_InternalEvent_to_versioned_Event(in *InternalEvent, out *WatchEvent, s conversion.Scope) error { + return Convert_watch_Event_to_versioned_Event((*watch.Event)(in), out, s) +} + +func Convert_versioned_Event_to_watch_Event(in *WatchEvent, out *watch.Event, s conversion.Scope) error { + out.Type = watch.EventType(in.Type) + if in.Object.Object != nil { + out.Object = in.Object.Object + } else if in.Object.Raw != nil { + // TODO: handle other fields on Unknown and detect type + out.Object = &runtime.Unknown{ + Raw: in.Object.Raw, + ContentType: runtime.ContentTypeJSON, + } + } + return nil +} + +func Convert_versioned_Event_to_versioned_InternalEvent(in *WatchEvent, out *InternalEvent, s conversion.Scope) error { + return Convert_versioned_Event_to_watch_Event(in, (*watch.Event)(out), s) +} + +// InternalEvent makes watch.Event versioned +// +protobuf=false +type InternalEvent watch.Event + +func (e *InternalEvent) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } +func (e *WatchEvent) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..6fac96be4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -0,0 +1,646 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + reflect "reflect" +) + +// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { + return []conversion.GeneratedDeepCopyFunc{ + {Fn: DeepCopy_v1_APIGroup, InType: reflect.TypeOf(&APIGroup{})}, + {Fn: DeepCopy_v1_APIGroupList, InType: reflect.TypeOf(&APIGroupList{})}, + {Fn: DeepCopy_v1_APIResource, InType: reflect.TypeOf(&APIResource{})}, + {Fn: DeepCopy_v1_APIResourceList, InType: reflect.TypeOf(&APIResourceList{})}, + {Fn: DeepCopy_v1_APIVersions, InType: reflect.TypeOf(&APIVersions{})}, + {Fn: DeepCopy_v1_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})}, + {Fn: DeepCopy_v1_Duration, InType: reflect.TypeOf(&Duration{})}, + {Fn: DeepCopy_v1_ExportOptions, InType: reflect.TypeOf(&ExportOptions{})}, + {Fn: DeepCopy_v1_GetOptions, InType: reflect.TypeOf(&GetOptions{})}, + {Fn: DeepCopy_v1_GroupKind, InType: reflect.TypeOf(&GroupKind{})}, + {Fn: DeepCopy_v1_GroupResource, InType: reflect.TypeOf(&GroupResource{})}, + {Fn: DeepCopy_v1_GroupVersion, InType: reflect.TypeOf(&GroupVersion{})}, + {Fn: DeepCopy_v1_GroupVersionForDiscovery, InType: reflect.TypeOf(&GroupVersionForDiscovery{})}, + {Fn: DeepCopy_v1_GroupVersionKind, InType: reflect.TypeOf(&GroupVersionKind{})}, + {Fn: DeepCopy_v1_GroupVersionResource, InType: reflect.TypeOf(&GroupVersionResource{})}, + {Fn: DeepCopy_v1_Initializer, InType: reflect.TypeOf(&Initializer{})}, + {Fn: DeepCopy_v1_Initializers, InType: reflect.TypeOf(&Initializers{})}, + {Fn: DeepCopy_v1_InternalEvent, InType: reflect.TypeOf(&InternalEvent{})}, + {Fn: DeepCopy_v1_LabelSelector, InType: reflect.TypeOf(&LabelSelector{})}, + {Fn: DeepCopy_v1_LabelSelectorRequirement, InType: reflect.TypeOf(&LabelSelectorRequirement{})}, + {Fn: DeepCopy_v1_ListMeta, InType: reflect.TypeOf(&ListMeta{})}, + {Fn: DeepCopy_v1_ListOptions, InType: reflect.TypeOf(&ListOptions{})}, + {Fn: DeepCopy_v1_MicroTime, InType: reflect.TypeOf(&MicroTime{})}, + {Fn: DeepCopy_v1_ObjectMeta, InType: reflect.TypeOf(&ObjectMeta{})}, + {Fn: DeepCopy_v1_OwnerReference, InType: reflect.TypeOf(&OwnerReference{})}, + {Fn: DeepCopy_v1_Patch, InType: reflect.TypeOf(&Patch{})}, + {Fn: DeepCopy_v1_Preconditions, InType: reflect.TypeOf(&Preconditions{})}, + {Fn: DeepCopy_v1_RootPaths, InType: reflect.TypeOf(&RootPaths{})}, + {Fn: DeepCopy_v1_ServerAddressByClientCIDR, InType: reflect.TypeOf(&ServerAddressByClientCIDR{})}, + {Fn: DeepCopy_v1_Status, InType: reflect.TypeOf(&Status{})}, + {Fn: DeepCopy_v1_StatusCause, InType: reflect.TypeOf(&StatusCause{})}, + {Fn: DeepCopy_v1_StatusDetails, InType: reflect.TypeOf(&StatusDetails{})}, + {Fn: DeepCopy_v1_Time, InType: reflect.TypeOf(&Time{})}, + {Fn: DeepCopy_v1_Timestamp, InType: reflect.TypeOf(&Timestamp{})}, + {Fn: DeepCopy_v1_TypeMeta, InType: reflect.TypeOf(&TypeMeta{})}, + {Fn: DeepCopy_v1_WatchEvent, InType: reflect.TypeOf(&WatchEvent{})}, + } +} + +// DeepCopy_v1_APIGroup is an autogenerated deepcopy function. +func DeepCopy_v1_APIGroup(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIGroup) + out := out.(*APIGroup) + *out = *in + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]GroupVersionForDiscovery, len(*in)) + copy(*out, *in) + } + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_APIGroupList is an autogenerated deepcopy function. +func DeepCopy_v1_APIGroupList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIGroupList) + out := out.(*APIGroupList) + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]APIGroup, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*APIGroup) + } + } + } + return nil + } +} + +// DeepCopy_v1_APIResource is an autogenerated deepcopy function. +func DeepCopy_v1_APIResource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIResource) + out := out.(*APIResource) + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make(Verbs, len(*in)) + copy(*out, *in) + } + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_APIResourceList is an autogenerated deepcopy function. +func DeepCopy_v1_APIResourceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIResourceList) + out := out.(*APIResourceList) + *out = *in + if in.APIResources != nil { + in, out := &in.APIResources, &out.APIResources + *out = make([]APIResource, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*APIResource) + } + } + } + return nil + } +} + +// DeepCopy_v1_APIVersions is an autogenerated deepcopy function. +func DeepCopy_v1_APIVersions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIVersions) + out := out.(*APIVersions) + *out = *in + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_DeleteOptions is an autogenerated deepcopy function. +func DeepCopy_v1_DeleteOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeleteOptions) + out := out.(*DeleteOptions) + *out = *in + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*Preconditions) + } + } + if in.OrphanDependents != nil { + in, out := &in.OrphanDependents, &out.OrphanDependents + *out = new(bool) + **out = **in + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + *out = new(DeletionPropagation) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_Duration is an autogenerated deepcopy function. +func DeepCopy_v1_Duration(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Duration) + out := out.(*Duration) + *out = *in + return nil + } +} + +// DeepCopy_v1_ExportOptions is an autogenerated deepcopy function. +func DeepCopy_v1_ExportOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExportOptions) + out := out.(*ExportOptions) + *out = *in + return nil + } +} + +// DeepCopy_v1_GetOptions is an autogenerated deepcopy function. +func DeepCopy_v1_GetOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GetOptions) + out := out.(*GetOptions) + *out = *in + return nil + } +} + +// DeepCopy_v1_GroupKind is an autogenerated deepcopy function. +func DeepCopy_v1_GroupKind(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupKind) + out := out.(*GroupKind) + *out = *in + return nil + } +} + +// DeepCopy_v1_GroupResource is an autogenerated deepcopy function. +func DeepCopy_v1_GroupResource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupResource) + out := out.(*GroupResource) + *out = *in + return nil + } +} + +// DeepCopy_v1_GroupVersion is an autogenerated deepcopy function. +func DeepCopy_v1_GroupVersion(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupVersion) + out := out.(*GroupVersion) + *out = *in + return nil + } +} + +// DeepCopy_v1_GroupVersionForDiscovery is an autogenerated deepcopy function. +func DeepCopy_v1_GroupVersionForDiscovery(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupVersionForDiscovery) + out := out.(*GroupVersionForDiscovery) + *out = *in + return nil + } +} + +// DeepCopy_v1_GroupVersionKind is an autogenerated deepcopy function. +func DeepCopy_v1_GroupVersionKind(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupVersionKind) + out := out.(*GroupVersionKind) + *out = *in + return nil + } +} + +// DeepCopy_v1_GroupVersionResource is an autogenerated deepcopy function. +func DeepCopy_v1_GroupVersionResource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupVersionResource) + out := out.(*GroupVersionResource) + *out = *in + return nil + } +} + +// DeepCopy_v1_Initializer is an autogenerated deepcopy function. +func DeepCopy_v1_Initializer(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Initializer) + out := out.(*Initializer) + *out = *in + return nil + } +} + +// DeepCopy_v1_Initializers is an autogenerated deepcopy function. +func DeepCopy_v1_Initializers(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Initializers) + out := out.(*Initializers) + *out = *in + if in.Pending != nil { + in, out := &in.Pending, &out.Pending + *out = make([]Initializer, len(*in)) + copy(*out, *in) + } + if in.Result != nil { + in, out := &in.Result, &out.Result + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*Status) + } + } + return nil + } +} + +// DeepCopy_v1_InternalEvent is an autogenerated deepcopy function. +func DeepCopy_v1_InternalEvent(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*InternalEvent) + out := out.(*InternalEvent) + *out = *in + // in.Object is kind 'Interface' + if in.Object != nil { + if newVal, err := c.DeepCopy(&in.Object); err != nil { + return err + } else { + out.Object = *newVal.(*runtime.Object) + } + } + return nil + } +} + +// DeepCopy_v1_LabelSelector is an autogenerated deepcopy function. +func DeepCopy_v1_LabelSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LabelSelector) + out := out.(*LabelSelector) + *out = *in + if in.MatchLabels != nil { + in, out := &in.MatchLabels, &out.MatchLabels + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*LabelSelectorRequirement) + } + } + } + return nil + } +} + +// DeepCopy_v1_LabelSelectorRequirement is an autogenerated deepcopy function. +func DeepCopy_v1_LabelSelectorRequirement(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LabelSelectorRequirement) + out := out.(*LabelSelectorRequirement) + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_ListMeta is an autogenerated deepcopy function. +func DeepCopy_v1_ListMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ListMeta) + out := out.(*ListMeta) + *out = *in + return nil + } +} + +// DeepCopy_v1_ListOptions is an autogenerated deepcopy function. +func DeepCopy_v1_ListOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ListOptions) + out := out.(*ListOptions) + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_MicroTime is an autogenerated deepcopy function. +func DeepCopy_v1_MicroTime(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MicroTime) + out := out.(*MicroTime) + *out = in.DeepCopy() + return nil + } +} + +// DeepCopy_v1_ObjectMeta is an autogenerated deepcopy function. +func DeepCopy_v1_ObjectMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMeta) + out := out.(*ObjectMeta) + *out = *in + out.CreationTimestamp = in.CreationTimestamp.DeepCopy() + if in.DeletionTimestamp != nil { + in, out := &in.DeletionTimestamp, &out.DeletionTimestamp + *out = new(Time) + **out = (*in).DeepCopy() + } + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]OwnerReference, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*OwnerReference) + } + } + } + if in.Initializers != nil { + in, out := &in.Initializers, &out.Initializers + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*Initializers) + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_OwnerReference is an autogenerated deepcopy function. +func DeepCopy_v1_OwnerReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*OwnerReference) + out := out.(*OwnerReference) + *out = *in + if in.Controller != nil { + in, out := &in.Controller, &out.Controller + *out = new(bool) + **out = **in + } + if in.BlockOwnerDeletion != nil { + in, out := &in.BlockOwnerDeletion, &out.BlockOwnerDeletion + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_Patch is an autogenerated deepcopy function. +func DeepCopy_v1_Patch(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Patch) + out := out.(*Patch) + *out = *in + return nil + } +} + +// DeepCopy_v1_Preconditions is an autogenerated deepcopy function. +func DeepCopy_v1_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Preconditions) + out := out.(*Preconditions) + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_RootPaths is an autogenerated deepcopy function. +func DeepCopy_v1_RootPaths(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RootPaths) + out := out.(*RootPaths) + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_ServerAddressByClientCIDR is an autogenerated deepcopy function. +func DeepCopy_v1_ServerAddressByClientCIDR(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServerAddressByClientCIDR) + out := out.(*ServerAddressByClientCIDR) + *out = *in + return nil + } +} + +// DeepCopy_v1_Status is an autogenerated deepcopy function. +func DeepCopy_v1_Status(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Status) + out := out.(*Status) + *out = *in + if in.Details != nil { + in, out := &in.Details, &out.Details + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*StatusDetails) + } + } + return nil + } +} + +// DeepCopy_v1_StatusCause is an autogenerated deepcopy function. +func DeepCopy_v1_StatusCause(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatusCause) + out := out.(*StatusCause) + *out = *in + return nil + } +} + +// DeepCopy_v1_StatusDetails is an autogenerated deepcopy function. +func DeepCopy_v1_StatusDetails(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatusDetails) + out := out.(*StatusDetails) + *out = *in + if in.Causes != nil { + in, out := &in.Causes, &out.Causes + *out = make([]StatusCause, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_Time is an autogenerated deepcopy function. +func DeepCopy_v1_Time(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Time) + out := out.(*Time) + *out = in.DeepCopy() + return nil + } +} + +// DeepCopy_v1_Timestamp is an autogenerated deepcopy function. +func DeepCopy_v1_Timestamp(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Timestamp) + out := out.(*Timestamp) + *out = *in + return nil + } +} + +// DeepCopy_v1_TypeMeta is an autogenerated deepcopy function. +func DeepCopy_v1_TypeMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TypeMeta) + out := out.(*TypeMeta) + *out = *in + return nil + } +} + +// DeepCopy_v1_WatchEvent is an autogenerated deepcopy function. +func DeepCopy_v1_WatchEvent(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*WatchEvent) + out := out.(*WatchEvent) + *out = *in + if newVal, err := c.DeepCopy(&in.Object); err != nil { + return err + } else { + out.Object = *newVal.(*runtime.RawExtension) + } + return nil + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go new file mode 100644 index 000000000..6df448eb9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/doc.go new file mode 100644 index 000000000..eea67c5c8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=meta.k8s.io +package v1alpha1 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.pb.go new file mode 100644 index 000000000..2d43bf94f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.pb.go @@ -0,0 +1,633 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.proto + + It has these top-level messages: + PartialObjectMetadata + PartialObjectMetadataList + TableOptions +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } +func (*PartialObjectMetadata) ProtoMessage() {} +func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } +func (*PartialObjectMetadataList) ProtoMessage() {} +func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *TableOptions) Reset() { *m = TableOptions{} } +func (*TableOptions) ProtoMessage() {} +func (*TableOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1alpha1.PartialObjectMetadata") + proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1alpha1.PartialObjectMetadataList") + proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1alpha1.TableOptions") +} +func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + return i, nil +} + +func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TableOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) + i += copy(dAtA[i:], m.IncludeObject) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PartialObjectMetadata) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PartialObjectMetadataList) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TableOptions) Size() (n int) { + var l int + _ = l + l = len(m.IncludeObject) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PartialObjectMetadata) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PartialObjectMetadata{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PartialObjectMetadataList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PartialObjectMetadataList{`, + `Items:` + strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "PartialObjectMetadata", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TableOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TableOptions{`, + `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, &PartialObjectMetadata{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TableOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 392 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xbf, 0x6e, 0xd4, 0x40, + 0x10, 0x87, 0xbd, 0x42, 0x91, 0x92, 0x0d, 0x69, 0x8c, 0x90, 0xc2, 0x15, 0xeb, 0xe8, 0xaa, 0x08, + 0xc1, 0x2e, 0x09, 0x08, 0xd1, 0xe2, 0x2e, 0x12, 0x28, 0x91, 0xa1, 0xa2, 0x62, 0x6d, 0x0f, 0xf6, + 0x62, 0x7b, 0xd7, 0xda, 0x1d, 0x47, 0xba, 0x0a, 0x1e, 0x81, 0xc7, 0xba, 0x32, 0x65, 0x2a, 0x8b, + 0x33, 0x6f, 0x41, 0x85, 0x6c, 0x5f, 0xc8, 0xbf, 0x3b, 0xe5, 0xba, 0x99, 0xdf, 0xe8, 0xfb, 0x3c, + 0xe3, 0xa5, 0x9f, 0x8a, 0x77, 0x8e, 0x2b, 0x23, 0x8a, 0x26, 0x06, 0xab, 0x01, 0xc1, 0x89, 0x73, + 0xd0, 0xa9, 0xb1, 0x62, 0x39, 0x90, 0xb5, 0xaa, 0x64, 0x92, 0x2b, 0x0d, 0x76, 0x26, 0xea, 0x22, + 0xeb, 0x03, 0x27, 0x2a, 0x40, 0x29, 0xce, 0x8f, 0x64, 0x59, 0xe7, 0xf2, 0x48, 0x64, 0xa0, 0xc1, + 0x4a, 0x84, 0x94, 0xd7, 0xd6, 0xa0, 0xf1, 0x9f, 0x8f, 0x2c, 0xbf, 0xc9, 0xf2, 0xba, 0xc8, 0xfa, + 0xc0, 0xf1, 0x9e, 0xe5, 0x57, 0xec, 0xe4, 0x65, 0xa6, 0x30, 0x6f, 0x62, 0x9e, 0x98, 0x4a, 0x64, + 0x26, 0x33, 0x62, 0x50, 0xc4, 0xcd, 0xb7, 0xa1, 0x1b, 0x9a, 0xa1, 0x1a, 0xd5, 0x93, 0x37, 0x9b, + 0xac, 0x75, 0x77, 0xa1, 0xc9, 0xda, 0x63, 0x6c, 0xa3, 0x51, 0x55, 0x70, 0x0f, 0x78, 0xfb, 0x10, + 0xe0, 0x92, 0x1c, 0x2a, 0x79, 0x8f, 0x7b, 0xbd, 0x8e, 0x6b, 0x50, 0x95, 0x42, 0x69, 0x74, 0x68, + 0xef, 0x42, 0xd3, 0x19, 0x7d, 0x7a, 0x26, 0x2d, 0x2a, 0x59, 0x9e, 0xc6, 0xdf, 0x21, 0xc1, 0x8f, + 0x80, 0x32, 0x95, 0x28, 0xfd, 0xaf, 0x74, 0xbb, 0x5a, 0xd6, 0xfb, 0xe4, 0x80, 0x1c, 0xee, 0x1e, + 0xbf, 0xe2, 0x9b, 0xfc, 0x5a, 0x7e, 0xed, 0x09, 0xfd, 0x79, 0x1b, 0x78, 0x5d, 0x1b, 0xd0, 0xeb, + 0x2c, 0xfa, 0x6f, 0x9d, 0xfe, 0xa0, 0xcf, 0x56, 0x7e, 0xfa, 0x83, 0x72, 0xe8, 0xc7, 0x74, 0x4b, + 0x21, 0x54, 0x6e, 0x9f, 0x1c, 0x3c, 0x3a, 0xdc, 0x3d, 0x7e, 0xcf, 0x37, 0x7f, 0x56, 0xbe, 0xd2, + 0x1a, 0xee, 0x74, 0x6d, 0xb0, 0x75, 0xd2, 0x3b, 0xa3, 0x51, 0x3d, 0x8d, 0xe9, 0xe3, 0xcf, 0x32, + 0x2e, 0xe1, 0xb4, 0x46, 0x65, 0xb4, 0xf3, 0x23, 0xba, 0xa7, 0x74, 0x52, 0x36, 0x29, 0x8c, 0xe8, + 0x70, 0xf7, 0x4e, 0xf8, 0x62, 0x79, 0xc5, 0xde, 0xc9, 0xcd, 0xe1, 0xdf, 0x36, 0x78, 0x72, 0x2b, + 0x38, 0x33, 0xa5, 0x4a, 0x66, 0xd1, 0x6d, 0x45, 0xc8, 0xe7, 0x0b, 0xe6, 0x5d, 0x2c, 0x98, 0x77, + 0xb9, 0x60, 0xde, 0xcf, 0x8e, 0x91, 0x79, 0xc7, 0xc8, 0x45, 0xc7, 0xc8, 0x65, 0xc7, 0xc8, 0xef, + 0x8e, 0x91, 0x5f, 0x7f, 0x98, 0xf7, 0x65, 0xfb, 0x6a, 0xf7, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x97, 0x95, 0xbb, 0xf9, 0x14, 0x03, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.proto new file mode 100644 index 000000000..06833dbd9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.proto @@ -0,0 +1,55 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.apis.meta.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +message PartialObjectMetadata { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +message PartialObjectMetadataList { + // items contains each of the included items. + repeated PartialObjectMetadata items = 1; +} + +// TableOptions are used when a Table is requested by the caller. +message TableOptions { + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1alpha1 of the meta.k8s.io API group. + optional string includeObject = 1; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/register.go new file mode 100644 index 000000000..89f08f383 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "meta.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// scheme is the registry for the common types that adhere to the meta v1alpha1 API spec. +var scheme = runtime.NewScheme() + +// ParameterCodec knows about query parameters used with the meta v1alpha1 API spec. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +func init() { + scheme.AddKnownTypes(SchemeGroupVersion, + &Table{}, + &TableOptions{}, + &PartialObjectMetadata{}, + &PartialObjectMetadataList{}, + ) + + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + //scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/types.go new file mode 100644 index 000000000..ef9921806 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/types.go @@ -0,0 +1,157 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package v1alpha1 is alpha objects from meta that will be introduced. +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf +// generation to support a meta type that can accept any valid JSON. + +// Table is a tabular representation of a set of API resources. The server transforms the +// object into a set of preferred columns for quickly reviewing the objects. +// +protobuf=false +type Table struct { + v1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + v1.ListMeta `json:"metadata,omitempty"` + + // columnDefinitions describes each column in the returned items array. The number of cells per row + // will always match the number of column definitions. + ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` + // rows is the list of items in the table. + Rows []TableRow `json:"rows"` +} + +// TableColumnDefinition contains information about a column returned in the Table. +// +protobuf=false +type TableColumnDefinition struct { + // name is a human readable name for the column. + Name string `json:"name"` + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Type string `json:"type"` + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Format string `json:"format"` + // description is a human readable description of this column. + Description string `json:"description"` + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a higher priority. + Priority int32 `json:"priority"` +} + +// TableRow is an individual row in a table. +// +protobuf=false +type TableRow struct { + // cells will be as wide as headers and may contain strings, numbers, booleans, simple maps, or lists, or + // null. See the type field of the column definition for a more detailed description. + Cells []interface{} `json:"cells"` + // conditions describe additional status of a row that are relevant for a human user. + // +optional + Conditions []TableRowCondition `json:"conditions,omitempty"` + // This field contains the requested additional information about each object based on the includeObject + // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the + // default serialization of the object for the current API version, and if "Metadata" (the default) will + // contain the object metadata. Check the returned kind and apiVersion of the object before parsing. + // +optional + Object runtime.RawExtension `json:"object,omitempty"` +} + +// TableRowCondition allows a row to be marked with additional information. +// +protobuf=false +type TableRowCondition struct { + // Type of row condition. + Type RowConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status"` + // (brief) machine readable reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +type RowConditionType string + +// These are valid conditions of a row. This list is not exhaustive and new conditions may be +// inculded by other resources. +const ( + // RowCompleted means the underlying resource has reached completion and may be given less + // visual priority than other resources. + RowCompleted RowConditionType = "Completed" +) + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// IncludeObjectPolicy controls which portion of the object is returned with a Table. +type IncludeObjectPolicy string + +const ( + // IncludeNone returns no object. + IncludeNone IncludeObjectPolicy = "None" + // IncludeMetadata serializes the object containing only its metadata field. + IncludeMetadata IncludeObjectPolicy = "Metadata" + // IncludeObject contains the full object. + IncludeObject IncludeObjectPolicy = "Object" +) + +// TableOptions are used when a Table is requested by the caller. +type TableOptions struct { + v1.TypeMeta `json:",inline"` + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1alpha1 of the meta.k8s.io API group. + IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"` +} + +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +type PartialObjectMetadata struct { + v1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +type PartialObjectMetadataList struct { + v1.TypeMeta `json:",inline"` + + // items contains each of the included items. + Items []*PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..e8bb62602 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,104 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_PartialObjectMetadata = map[string]string{ + "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", +} + +func (PartialObjectMetadata) SwaggerDoc() map[string]string { + return map_PartialObjectMetadata +} + +var map_PartialObjectMetadataList = map[string]string{ + "": "PartialObjectMetadataList contains a list of objects containing only their metadata", + "items": "items contains each of the included items.", +} + +func (PartialObjectMetadataList) SwaggerDoc() map[string]string { + return map_PartialObjectMetadataList +} + +var map_Table = map[string]string{ + "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", + "rows": "rows is the list of items in the table.", +} + +func (Table) SwaggerDoc() map[string]string { + return map_Table +} + +var map_TableColumnDefinition = map[string]string{ + "": "TableColumnDefinition contains information about a column returned in the Table.", + "name": "name is a human readable name for the column.", + "type": "type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "format": "format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "description": "description is a human readable description of this column.", + "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", +} + +func (TableColumnDefinition) SwaggerDoc() map[string]string { + return map_TableColumnDefinition +} + +var map_TableOptions = map[string]string{ + "": "TableOptions are used when a Table is requested by the caller.", + "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1alpha1 of the meta.k8s.io API group.", +} + +func (TableOptions) SwaggerDoc() map[string]string { + return map_TableOptions +} + +var map_TableRow = map[string]string{ + "": "TableRow is an individual row in a table.", + "cells": "cells will be as wide as headers and may contain strings, numbers, booleans, simple maps, or lists, or null. See the type field of the column definition for a more detailed description.", + "conditions": "conditions describe additional status of a row that are relevant for a human user.", + "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing.", +} + +func (TableRow) SwaggerDoc() map[string]string { + return map_TableRow +} + +var map_TableRowCondition = map[string]string{ + "": "TableRowCondition allows a row to be marked with additional information.", + "type": "Type of row condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "reason": "(brief) machine readable reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (TableRowCondition) SwaggerDoc() map[string]string { + return map_TableRowCondition +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..ef8a117df --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,164 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { + return []conversion.GeneratedDeepCopyFunc{ + {Fn: DeepCopy_v1alpha1_PartialObjectMetadata, InType: reflect.TypeOf(&PartialObjectMetadata{})}, + {Fn: DeepCopy_v1alpha1_PartialObjectMetadataList, InType: reflect.TypeOf(&PartialObjectMetadataList{})}, + {Fn: DeepCopy_v1alpha1_Table, InType: reflect.TypeOf(&Table{})}, + {Fn: DeepCopy_v1alpha1_TableColumnDefinition, InType: reflect.TypeOf(&TableColumnDefinition{})}, + {Fn: DeepCopy_v1alpha1_TableOptions, InType: reflect.TypeOf(&TableOptions{})}, + {Fn: DeepCopy_v1alpha1_TableRow, InType: reflect.TypeOf(&TableRow{})}, + {Fn: DeepCopy_v1alpha1_TableRowCondition, InType: reflect.TypeOf(&TableRowCondition{})}, + } +} + +// DeepCopy_v1alpha1_PartialObjectMetadata is an autogenerated deepcopy function. +func DeepCopy_v1alpha1_PartialObjectMetadata(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PartialObjectMetadata) + out := out.(*PartialObjectMetadata) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + return nil + } +} + +// DeepCopy_v1alpha1_PartialObjectMetadataList is an autogenerated deepcopy function. +func DeepCopy_v1alpha1_PartialObjectMetadataList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PartialObjectMetadataList) + out := out.(*PartialObjectMetadataList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*PartialObjectMetadata, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(**PartialObjectMetadata) + } + } + } + return nil + } +} + +// DeepCopy_v1alpha1_Table is an autogenerated deepcopy function. +func DeepCopy_v1alpha1_Table(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Table) + out := out.(*Table) + *out = *in + if in.ColumnDefinitions != nil { + in, out := &in.ColumnDefinitions, &out.ColumnDefinitions + *out = make([]TableColumnDefinition, len(*in)) + copy(*out, *in) + } + if in.Rows != nil { + in, out := &in.Rows, &out.Rows + *out = make([]TableRow, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*TableRow) + } + } + } + return nil + } +} + +// DeepCopy_v1alpha1_TableColumnDefinition is an autogenerated deepcopy function. +func DeepCopy_v1alpha1_TableColumnDefinition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TableColumnDefinition) + out := out.(*TableColumnDefinition) + *out = *in + return nil + } +} + +// DeepCopy_v1alpha1_TableOptions is an autogenerated deepcopy function. +func DeepCopy_v1alpha1_TableOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TableOptions) + out := out.(*TableOptions) + *out = *in + return nil + } +} + +// DeepCopy_v1alpha1_TableRow is an autogenerated deepcopy function. +func DeepCopy_v1alpha1_TableRow(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TableRow) + out := out.(*TableRow) + *out = *in + if in.Cells != nil { + in, out := &in.Cells, &out.Cells + *out = make([]interface{}, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*interface{}) + } + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]TableRowCondition, len(*in)) + copy(*out, *in) + } + if newVal, err := c.DeepCopy(&in.Object); err != nil { + return err + } else { + out.Object = *newVal.(*runtime.RawExtension) + } + return nil + } +} + +// DeepCopy_v1alpha1_TableRowCondition is an autogenerated deepcopy function. +func DeepCopy_v1alpha1_TableRowCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TableRowCondition) + out := out.(*TableRowCondition) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.defaults.go new file mode 100644 index 000000000..7e6df29d4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go b/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go new file mode 100644 index 000000000..c5dec1f31 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go @@ -0,0 +1,249 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "fmt" + "reflect" +) + +// Cloner knows how to copy one type to another. +type Cloner struct { + // Map from the type to a function which can do the deep copy. + deepCopyFuncs map[reflect.Type]reflect.Value + generatedDeepCopyFuncs map[reflect.Type]func(in interface{}, out interface{}, c *Cloner) error +} + +// NewCloner creates a new Cloner object. +func NewCloner() *Cloner { + c := &Cloner{ + deepCopyFuncs: map[reflect.Type]reflect.Value{}, + generatedDeepCopyFuncs: map[reflect.Type]func(in interface{}, out interface{}, c *Cloner) error{}, + } + if err := c.RegisterDeepCopyFunc(byteSliceDeepCopy); err != nil { + // If one of the deep-copy functions is malformed, detect it immediately. + panic(err) + } + return c +} + +// Prevent recursing into every byte... +func byteSliceDeepCopy(in *[]byte, out *[]byte, c *Cloner) error { + if *in != nil { + *out = make([]byte, len(*in)) + copy(*out, *in) + } else { + *out = nil + } + return nil +} + +// Verifies whether a deep-copy function has a correct signature. +func verifyDeepCopyFunctionSignature(ft reflect.Type) error { + if ft.Kind() != reflect.Func { + return fmt.Errorf("expected func, got: %v", ft) + } + if ft.NumIn() != 3 { + return fmt.Errorf("expected three 'in' params, got %v", ft) + } + if ft.NumOut() != 1 { + return fmt.Errorf("expected one 'out' param, got %v", ft) + } + if ft.In(0).Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft) + } + if ft.In(1) != ft.In(0) { + return fmt.Errorf("expected 'in' param 0 the same as param 1, got: %v", ft) + } + var forClonerType Cloner + if expected := reflect.TypeOf(&forClonerType); ft.In(2) != expected { + return fmt.Errorf("expected '%v' arg for 'in' param 2, got: '%v'", expected, ft.In(2)) + } + var forErrorType error + // This convolution is necessary, otherwise TypeOf picks up on the fact + // that forErrorType is nil + errorType := reflect.TypeOf(&forErrorType).Elem() + if ft.Out(0) != errorType { + return fmt.Errorf("expected error return, got: %v", ft) + } + return nil +} + +// RegisterGeneratedDeepCopyFunc registers a copying func with the Cloner. +// deepCopyFunc must take three parameters: a type input, a pointer to a +// type output, and a pointer to Cloner. It should return an error. +// +// Example: +// c.RegisterGeneratedDeepCopyFunc( +// func(in Pod, out *Pod, c *Cloner) error { +// // deep copy logic... +// return nil +// }) +func (c *Cloner) RegisterDeepCopyFunc(deepCopyFunc interface{}) error { + fv := reflect.ValueOf(deepCopyFunc) + ft := fv.Type() + if err := verifyDeepCopyFunctionSignature(ft); err != nil { + return err + } + c.deepCopyFuncs[ft.In(0)] = fv + return nil +} + +// GeneratedDeepCopyFunc bundles an untyped generated deep-copy function of a type +// with a reflection type object used as a key to lookup the deep-copy function. +type GeneratedDeepCopyFunc struct { + Fn func(in interface{}, out interface{}, c *Cloner) error + InType reflect.Type +} + +// Similar to RegisterDeepCopyFunc, but registers deep copy function that were +// automatically generated. +func (c *Cloner) RegisterGeneratedDeepCopyFunc(fn GeneratedDeepCopyFunc) error { + c.generatedDeepCopyFuncs[fn.InType] = fn.Fn + return nil +} + +// DeepCopy will perform a deep copy of a given object. +func (c *Cloner) DeepCopy(in interface{}) (interface{}, error) { + // Can be invalid if we run DeepCopy(X) where X is a nil interface type. + // For example, we get an invalid value when someone tries to deep-copy + // a nil labels.Selector. + // This does not occur if X is nil and is a pointer to a concrete type. + if in == nil { + return nil, nil + } + inValue := reflect.ValueOf(in) + outValue, err := c.deepCopy(inValue) + if err != nil { + return nil, err + } + return outValue.Interface(), nil +} + +func (c *Cloner) deepCopy(src reflect.Value) (reflect.Value, error) { + inType := src.Type() + + switch src.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + if src.IsNil() { + return src, nil + } + } + + if fv, ok := c.deepCopyFuncs[inType]; ok { + return c.customDeepCopy(src, fv) + } + if fv, ok := c.generatedDeepCopyFuncs[inType]; ok { + var outValue reflect.Value + outValue = reflect.New(inType.Elem()) + err := fv(src.Interface(), outValue.Interface(), c) + return outValue, err + } + return c.defaultDeepCopy(src) +} + +func (c *Cloner) customDeepCopy(src, fv reflect.Value) (reflect.Value, error) { + outValue := reflect.New(src.Type().Elem()) + args := []reflect.Value{src, outValue, reflect.ValueOf(c)} + result := fv.Call(args)[0].Interface() + // This convolution is necessary because nil interfaces won't convert + // to error. + if result == nil { + return outValue, nil + } + return outValue, result.(error) +} + +func (c *Cloner) defaultDeepCopy(src reflect.Value) (reflect.Value, error) { + switch src.Kind() { + case reflect.Chan, reflect.Func, reflect.UnsafePointer, reflect.Uintptr: + return src, fmt.Errorf("cannot deep copy kind: %s", src.Kind()) + case reflect.Array: + dst := reflect.New(src.Type()) + for i := 0; i < src.Len(); i++ { + copyVal, err := c.deepCopy(src.Index(i)) + if err != nil { + return src, err + } + dst.Elem().Index(i).Set(copyVal) + } + return dst.Elem(), nil + case reflect.Interface: + if src.IsNil() { + return src, nil + } + return c.deepCopy(src.Elem()) + case reflect.Map: + if src.IsNil() { + return src, nil + } + dst := reflect.MakeMap(src.Type()) + for _, k := range src.MapKeys() { + copyVal, err := c.deepCopy(src.MapIndex(k)) + if err != nil { + return src, err + } + dst.SetMapIndex(k, copyVal) + } + return dst, nil + case reflect.Ptr: + if src.IsNil() { + return src, nil + } + dst := reflect.New(src.Type().Elem()) + copyVal, err := c.deepCopy(src.Elem()) + if err != nil { + return src, err + } + dst.Elem().Set(copyVal) + return dst, nil + case reflect.Slice: + if src.IsNil() { + return src, nil + } + dst := reflect.MakeSlice(src.Type(), 0, src.Len()) + for i := 0; i < src.Len(); i++ { + copyVal, err := c.deepCopy(src.Index(i)) + if err != nil { + return src, err + } + dst = reflect.Append(dst, copyVal) + } + return dst, nil + case reflect.Struct: + dst := reflect.New(src.Type()) + for i := 0; i < src.NumField(); i++ { + if !dst.Elem().Field(i).CanSet() { + // Can't set private fields. At this point, the + // best we can do is a shallow copy. For + // example, time.Time is a value type with + // private members that can be shallow copied. + return src, nil + } + copyVal, err := c.deepCopy(src.Field(i)) + if err != nil { + return src, err + } + dst.Elem().Field(i).Set(copyVal) + } + return dst.Elem(), nil + + default: + // Value types like numbers, booleans, and strings. + return src, nil + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go new file mode 100644 index 000000000..7854c207c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go @@ -0,0 +1,898 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "fmt" + "reflect" +) + +type typePair struct { + source reflect.Type + dest reflect.Type +} + +type typeNamePair struct { + fieldType reflect.Type + fieldName string +} + +// DebugLogger allows you to get debugging messages if necessary. +type DebugLogger interface { + Logf(format string, args ...interface{}) +} + +type NameFunc func(t reflect.Type) string + +var DefaultNameFunc = func(t reflect.Type) string { return t.Name() } + +type GenericConversionFunc func(a, b interface{}, scope Scope) (bool, error) + +// Converter knows how to convert one type to another. +type Converter struct { + // Map from the conversion pair to a function which can + // do the conversion. + conversionFuncs ConversionFuncs + generatedConversionFuncs ConversionFuncs + + // genericConversions are called during normal conversion to offer a "fast-path" + // that avoids all reflection. These methods are not called outside of the .Convert() + // method. + genericConversions []GenericConversionFunc + + // Set of conversions that should be treated as a no-op + ignoredConversions map[typePair]struct{} + + // This is a map from a source field type and name, to a list of destination + // field type and name. + structFieldDests map[typeNamePair][]typeNamePair + + // Allows for the opposite lookup of structFieldDests. So that SourceFromDest + // copy flag also works. So this is a map of destination field name, to potential + // source field name and type to look for. + structFieldSources map[typeNamePair][]typeNamePair + + // Map from an input type to a function which can apply a key name mapping + inputFieldMappingFuncs map[reflect.Type]FieldMappingFunc + + // Map from an input type to a set of default conversion flags. + inputDefaultFlags map[reflect.Type]FieldMatchingFlags + + // If non-nil, will be called to print helpful debugging info. Quite verbose. + Debug DebugLogger + + // nameFunc is called to retrieve the name of a type; this name is used for the + // purpose of deciding whether two types match or not (i.e., will we attempt to + // do a conversion). The default returns the go type name. + nameFunc func(t reflect.Type) string +} + +// NewConverter creates a new Converter object. +func NewConverter(nameFn NameFunc) *Converter { + c := &Converter{ + conversionFuncs: NewConversionFuncs(), + generatedConversionFuncs: NewConversionFuncs(), + ignoredConversions: make(map[typePair]struct{}), + nameFunc: nameFn, + structFieldDests: make(map[typeNamePair][]typeNamePair), + structFieldSources: make(map[typeNamePair][]typeNamePair), + + inputFieldMappingFuncs: make(map[reflect.Type]FieldMappingFunc), + inputDefaultFlags: make(map[reflect.Type]FieldMatchingFlags), + } + c.RegisterConversionFunc(Convert_Slice_byte_To_Slice_byte) + return c +} + +// AddGenericConversionFunc adds a function that accepts the ConversionFunc call pattern +// (for two conversion types) to the converter. These functions are checked first during +// a normal conversion, but are otherwise not called. Use AddConversionFuncs when registering +// typed conversions. +func (c *Converter) AddGenericConversionFunc(fn GenericConversionFunc) { + c.genericConversions = append(c.genericConversions, fn) +} + +// WithConversions returns a Converter that is a copy of c but with the additional +// fns merged on top. +func (c *Converter) WithConversions(fns ConversionFuncs) *Converter { + copied := *c + copied.conversionFuncs = c.conversionFuncs.Merge(fns) + return &copied +} + +// DefaultMeta returns the conversion FieldMappingFunc and meta for a given type. +func (c *Converter) DefaultMeta(t reflect.Type) (FieldMatchingFlags, *Meta) { + return c.inputDefaultFlags[t], &Meta{ + KeyNameMapping: c.inputFieldMappingFuncs[t], + } +} + +// Convert_Slice_byte_To_Slice_byte prevents recursing into every byte +func Convert_Slice_byte_To_Slice_byte(in *[]byte, out *[]byte, s Scope) error { + if *in == nil { + *out = nil + return nil + } + *out = make([]byte, len(*in)) + copy(*out, *in) + return nil +} + +// Scope is passed to conversion funcs to allow them to continue an ongoing conversion. +// If multiple converters exist in the system, Scope will allow you to use the correct one +// from a conversion function--that is, the one your conversion function was called by. +type Scope interface { + // Call Convert to convert sub-objects. Note that if you call it with your own exact + // parameters, you'll run out of stack space before anything useful happens. + Convert(src, dest interface{}, flags FieldMatchingFlags) error + + // DefaultConvert performs the default conversion, without calling a conversion func + // on the current stack frame. This makes it safe to call from a conversion func. + DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error + + // SrcTags and DestTags contain the struct tags that src and dest had, respectively. + // If the enclosing object was not a struct, then these will contain no tags, of course. + SrcTag() reflect.StructTag + DestTag() reflect.StructTag + + // Flags returns the flags with which the conversion was started. + Flags() FieldMatchingFlags + + // Meta returns any information originally passed to Convert. + Meta() *Meta +} + +// FieldMappingFunc can convert an input field value into different values, depending on +// the value of the source or destination struct tags. +type FieldMappingFunc func(key string, sourceTag, destTag reflect.StructTag) (source string, dest string) + +func NewConversionFuncs() ConversionFuncs { + return ConversionFuncs{fns: make(map[typePair]reflect.Value)} +} + +type ConversionFuncs struct { + fns map[typePair]reflect.Value +} + +// Add adds the provided conversion functions to the lookup table - they must have the signature +// `func(type1, type2, Scope) error`. Functions are added in the order passed and will override +// previously registered pairs. +func (c ConversionFuncs) Add(fns ...interface{}) error { + for _, fn := range fns { + fv := reflect.ValueOf(fn) + ft := fv.Type() + if err := verifyConversionFunctionSignature(ft); err != nil { + return err + } + c.fns[typePair{ft.In(0).Elem(), ft.In(1).Elem()}] = fv + } + return nil +} + +// Merge returns a new ConversionFuncs that contains all conversions from +// both other and c, with other conversions taking precedence. +func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs { + merged := NewConversionFuncs() + for k, v := range c.fns { + merged.fns[k] = v + } + for k, v := range other.fns { + merged.fns[k] = v + } + return merged +} + +// Meta is supplied by Scheme, when it calls Convert. +type Meta struct { + // KeyNameMapping is an optional function which may map the listed key (field name) + // into a source and destination value. + KeyNameMapping FieldMappingFunc + // Context is an optional field that callers may use to pass info to conversion functions. + Context interface{} +} + +// scope contains information about an ongoing conversion. +type scope struct { + converter *Converter + meta *Meta + flags FieldMatchingFlags + + // srcStack & destStack are separate because they may not have a 1:1 + // relationship. + srcStack scopeStack + destStack scopeStack +} + +type scopeStackElem struct { + tag reflect.StructTag + value reflect.Value + key string +} + +type scopeStack []scopeStackElem + +func (s *scopeStack) pop() { + n := len(*s) + *s = (*s)[:n-1] +} + +func (s *scopeStack) push(e scopeStackElem) { + *s = append(*s, e) +} + +func (s *scopeStack) top() *scopeStackElem { + return &(*s)[len(*s)-1] +} + +func (s scopeStack) describe() string { + desc := "" + if len(s) > 1 { + desc = "(" + s[1].value.Type().String() + ")" + } + for i, v := range s { + if i < 2 { + // First layer on stack is not real; second is handled specially above. + continue + } + if v.key == "" { + desc += fmt.Sprintf(".%v", v.value.Type()) + } else { + desc += fmt.Sprintf(".%v", v.key) + } + } + return desc +} + +// Formats src & dest as indices for printing. +func (s *scope) setIndices(src, dest int) { + s.srcStack.top().key = fmt.Sprintf("[%v]", src) + s.destStack.top().key = fmt.Sprintf("[%v]", dest) +} + +// Formats src & dest as map keys for printing. +func (s *scope) setKeys(src, dest interface{}) { + s.srcStack.top().key = fmt.Sprintf(`["%v"]`, src) + s.destStack.top().key = fmt.Sprintf(`["%v"]`, dest) +} + +// Convert continues a conversion. +func (s *scope) Convert(src, dest interface{}, flags FieldMatchingFlags) error { + return s.converter.Convert(src, dest, flags, s.meta) +} + +// DefaultConvert continues a conversion, performing a default conversion (no conversion func) +// for the current stack frame. +func (s *scope) DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error { + return s.converter.DefaultConvert(src, dest, flags, s.meta) +} + +// SrcTag returns the tag of the struct containing the current source item, if any. +func (s *scope) SrcTag() reflect.StructTag { + return s.srcStack.top().tag +} + +// DestTag returns the tag of the struct containing the current dest item, if any. +func (s *scope) DestTag() reflect.StructTag { + return s.destStack.top().tag +} + +// Flags returns the flags with which the current conversion was started. +func (s *scope) Flags() FieldMatchingFlags { + return s.flags +} + +// Meta returns the meta object that was originally passed to Convert. +func (s *scope) Meta() *Meta { + return s.meta +} + +// describe prints the path to get to the current (source, dest) values. +func (s *scope) describe() (src, dest string) { + return s.srcStack.describe(), s.destStack.describe() +} + +// error makes an error that includes information about where we were in the objects +// we were asked to convert. +func (s *scope) errorf(message string, args ...interface{}) error { + srcPath, destPath := s.describe() + where := fmt.Sprintf("converting %v to %v: ", srcPath, destPath) + return fmt.Errorf(where+message, args...) +} + +// Verifies whether a conversion function has a correct signature. +func verifyConversionFunctionSignature(ft reflect.Type) error { + if ft.Kind() != reflect.Func { + return fmt.Errorf("expected func, got: %v", ft) + } + if ft.NumIn() != 3 { + return fmt.Errorf("expected three 'in' params, got: %v", ft) + } + if ft.NumOut() != 1 { + return fmt.Errorf("expected one 'out' param, got: %v", ft) + } + if ft.In(0).Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft) + } + if ft.In(1).Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer arg for 'in' param 1, got: %v", ft) + } + scopeType := Scope(nil) + if e, a := reflect.TypeOf(&scopeType).Elem(), ft.In(2); e != a { + return fmt.Errorf("expected '%v' arg for 'in' param 2, got '%v' (%v)", e, a, ft) + } + var forErrorType error + // This convolution is necessary, otherwise TypeOf picks up on the fact + // that forErrorType is nil. + errorType := reflect.TypeOf(&forErrorType).Elem() + if ft.Out(0) != errorType { + return fmt.Errorf("expected error return, got: %v", ft) + } + return nil +} + +// RegisterConversionFunc registers a conversion func with the +// Converter. conversionFunc must take three parameters: a pointer to the input +// type, a pointer to the output type, and a conversion.Scope (which should be +// used if recursive conversion calls are desired). It must return an error. +// +// Example: +// c.RegisterConversionFunc( +// func(in *Pod, out *v1.Pod, s Scope) error { +// // conversion logic... +// return nil +// }) +func (c *Converter) RegisterConversionFunc(conversionFunc interface{}) error { + return c.conversionFuncs.Add(conversionFunc) +} + +// Similar to RegisterConversionFunc, but registers conversion function that were +// automatically generated. +func (c *Converter) RegisterGeneratedConversionFunc(conversionFunc interface{}) error { + return c.generatedConversionFuncs.Add(conversionFunc) +} + +// RegisterIgnoredConversion registers a "no-op" for conversion, where any requested +// conversion between from and to is ignored. +func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error { + typeFrom := reflect.TypeOf(from) + typeTo := reflect.TypeOf(to) + if reflect.TypeOf(from).Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer arg for 'from' param 0, got: %v", typeFrom) + } + if typeTo.Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer arg for 'to' param 1, got: %v", typeTo) + } + c.ignoredConversions[typePair{typeFrom.Elem(), typeTo.Elem()}] = struct{}{} + return nil +} + +// IsConversionIgnored returns true if the specified objects should be dropped during +// conversion. +func (c *Converter) IsConversionIgnored(inType, outType reflect.Type) bool { + _, found := c.ignoredConversions[typePair{inType, outType}] + return found +} + +func (c *Converter) HasConversionFunc(inType, outType reflect.Type) bool { + _, found := c.conversionFuncs.fns[typePair{inType, outType}] + return found +} + +func (c *Converter) ConversionFuncValue(inType, outType reflect.Type) (reflect.Value, bool) { + value, found := c.conversionFuncs.fns[typePair{inType, outType}] + return value, found +} + +// SetStructFieldCopy registers a correspondence. Whenever a struct field is encountered +// which has a type and name matching srcFieldType and srcFieldName, it wil be copied +// into the field in the destination struct matching destFieldType & Name, if such a +// field exists. +// May be called multiple times, even for the same source field & type--all applicable +// copies will be performed. +func (c *Converter) SetStructFieldCopy(srcFieldType interface{}, srcFieldName string, destFieldType interface{}, destFieldName string) error { + st := reflect.TypeOf(srcFieldType) + dt := reflect.TypeOf(destFieldType) + srcKey := typeNamePair{st, srcFieldName} + destKey := typeNamePair{dt, destFieldName} + c.structFieldDests[srcKey] = append(c.structFieldDests[srcKey], destKey) + c.structFieldSources[destKey] = append(c.structFieldSources[destKey], srcKey) + return nil +} + +// RegisterInputDefaults registers a field name mapping function, used when converting +// from maps to structs. Inputs to the conversion methods are checked for this type and a mapping +// applied automatically if the input matches in. A set of default flags for the input conversion +// may also be provided, which will be used when no explicit flags are requested. +func (c *Converter) RegisterInputDefaults(in interface{}, fn FieldMappingFunc, defaultFlags FieldMatchingFlags) error { + fv := reflect.ValueOf(in) + ft := fv.Type() + if ft.Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer 'in' argument, got: %v", ft) + } + c.inputFieldMappingFuncs[ft] = fn + c.inputDefaultFlags[ft] = defaultFlags + return nil +} + +// FieldMatchingFlags contains a list of ways in which struct fields could be +// copied. These constants may be | combined. +type FieldMatchingFlags int + +const ( + // Loop through destination fields, search for matching source + // field to copy it from. Source fields with no corresponding + // destination field will be ignored. If SourceToDest is + // specified, this flag is ignored. If neither is specified, + // or no flags are passed, this flag is the default. + DestFromSource FieldMatchingFlags = 0 + // Loop through source fields, search for matching dest field + // to copy it into. Destination fields with no corresponding + // source field will be ignored. + SourceToDest FieldMatchingFlags = 1 << iota + // Don't treat it as an error if the corresponding source or + // dest field can't be found. + IgnoreMissingFields + // Don't require type names to match. + AllowDifferentFieldTypeNames +) + +// IsSet returns true if the given flag or combination of flags is set. +func (f FieldMatchingFlags) IsSet(flag FieldMatchingFlags) bool { + if flag == DestFromSource { + // The bit logic doesn't work on the default value. + return f&SourceToDest != SourceToDest + } + return f&flag == flag +} + +// Convert will translate src to dest if it knows how. Both must be pointers. +// If no conversion func is registered and the default copying mechanism +// doesn't work on this type pair, an error will be returned. +// Read the comments on the various FieldMatchingFlags constants to understand +// what the 'flags' parameter does. +// 'meta' is given to allow you to pass information to conversion functions, +// it is not used by Convert() other than storing it in the scope. +// Not safe for objects with cyclic references! +func (c *Converter) Convert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error { + if len(c.genericConversions) > 0 { + // TODO: avoid scope allocation + s := &scope{converter: c, flags: flags, meta: meta} + for _, fn := range c.genericConversions { + if ok, err := fn(src, dest, s); ok { + return err + } + } + } + return c.doConversion(src, dest, flags, meta, c.convert) +} + +// DefaultConvert will translate src to dest if it knows how. Both must be pointers. +// No conversion func is used. If the default copying mechanism +// doesn't work on this type pair, an error will be returned. +// Read the comments on the various FieldMatchingFlags constants to understand +// what the 'flags' parameter does. +// 'meta' is given to allow you to pass information to conversion functions, +// it is not used by DefaultConvert() other than storing it in the scope. +// Not safe for objects with cyclic references! +func (c *Converter) DefaultConvert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error { + return c.doConversion(src, dest, flags, meta, c.defaultConvert) +} + +type conversionFunc func(sv, dv reflect.Value, scope *scope) error + +func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags, meta *Meta, f conversionFunc) error { + dv, err := EnforcePtr(dest) + if err != nil { + return err + } + if !dv.CanAddr() && !dv.CanSet() { + return fmt.Errorf("can't write to dest") + } + sv, err := EnforcePtr(src) + if err != nil { + return err + } + s := &scope{ + converter: c, + flags: flags, + meta: meta, + } + // Leave something on the stack, so that calls to struct tag getters never fail. + s.srcStack.push(scopeStackElem{}) + s.destStack.push(scopeStackElem{}) + return f(sv, dv, s) +} + +// callCustom calls 'custom' with sv & dv. custom must be a conversion function. +func (c *Converter) callCustom(sv, dv, custom reflect.Value, scope *scope) error { + if !sv.CanAddr() { + sv2 := reflect.New(sv.Type()) + sv2.Elem().Set(sv) + sv = sv2 + } else { + sv = sv.Addr() + } + if !dv.CanAddr() { + if !dv.CanSet() { + return scope.errorf("can't addr or set dest.") + } + dvOrig := dv + dv := reflect.New(dvOrig.Type()) + defer func() { dvOrig.Set(dv) }() + } else { + dv = dv.Addr() + } + args := []reflect.Value{sv, dv, reflect.ValueOf(scope)} + ret := custom.Call(args)[0].Interface() + // This convolution is necessary because nil interfaces won't convert + // to errors. + if ret == nil { + return nil + } + return ret.(error) +} + +// convert recursively copies sv into dv, calling an appropriate conversion function if +// one is registered. +func (c *Converter) convert(sv, dv reflect.Value, scope *scope) error { + dt, st := dv.Type(), sv.Type() + pair := typePair{st, dt} + + // ignore conversions of this type + if _, ok := c.ignoredConversions[pair]; ok { + if c.Debug != nil { + c.Debug.Logf("Ignoring conversion of '%v' to '%v'", st, dt) + } + return nil + } + + // Convert sv to dv. + if fv, ok := c.conversionFuncs.fns[pair]; ok { + if c.Debug != nil { + c.Debug.Logf("Calling custom conversion of '%v' to '%v'", st, dt) + } + return c.callCustom(sv, dv, fv, scope) + } + if fv, ok := c.generatedConversionFuncs.fns[pair]; ok { + if c.Debug != nil { + c.Debug.Logf("Calling generated conversion of '%v' to '%v'", st, dt) + } + return c.callCustom(sv, dv, fv, scope) + } + + return c.defaultConvert(sv, dv, scope) +} + +// defaultConvert recursively copies sv into dv. no conversion function is called +// for the current stack frame (but conversion functions may be called for nested objects) +func (c *Converter) defaultConvert(sv, dv reflect.Value, scope *scope) error { + dt, st := dv.Type(), sv.Type() + + if !dv.CanSet() { + return scope.errorf("Cannot set dest. (Tried to deep copy something with unexported fields?)") + } + + if !scope.flags.IsSet(AllowDifferentFieldTypeNames) && c.nameFunc(dt) != c.nameFunc(st) { + return scope.errorf( + "type names don't match (%v, %v), and no conversion 'func (%v, %v) error' registered.", + c.nameFunc(st), c.nameFunc(dt), st, dt) + } + + switch st.Kind() { + case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct: + // Don't copy these via assignment/conversion! + default: + // This should handle all simple types. + if st.AssignableTo(dt) { + dv.Set(sv) + return nil + } + if st.ConvertibleTo(dt) { + dv.Set(sv.Convert(dt)) + return nil + } + } + + if c.Debug != nil { + c.Debug.Logf("Trying to convert '%v' to '%v'", st, dt) + } + + scope.srcStack.push(scopeStackElem{value: sv}) + scope.destStack.push(scopeStackElem{value: dv}) + defer scope.srcStack.pop() + defer scope.destStack.pop() + + switch dv.Kind() { + case reflect.Struct: + return c.convertKV(toKVValue(sv), toKVValue(dv), scope) + case reflect.Slice: + if sv.IsNil() { + // Don't make a zero-length slice. + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap())) + for i := 0; i < sv.Len(); i++ { + scope.setIndices(i, i) + if err := c.convert(sv.Index(i), dv.Index(i), scope); err != nil { + return err + } + } + case reflect.Ptr: + if sv.IsNil() { + // Don't copy a nil ptr! + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.New(dt.Elem())) + switch st.Kind() { + case reflect.Ptr, reflect.Interface: + return c.convert(sv.Elem(), dv.Elem(), scope) + default: + return c.convert(sv, dv.Elem(), scope) + } + case reflect.Map: + if sv.IsNil() { + // Don't copy a nil ptr! + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.MakeMap(dt)) + for _, sk := range sv.MapKeys() { + dk := reflect.New(dt.Key()).Elem() + if err := c.convert(sk, dk, scope); err != nil { + return err + } + dkv := reflect.New(dt.Elem()).Elem() + scope.setKeys(sk.Interface(), dk.Interface()) + // TODO: sv.MapIndex(sk) may return a value with CanAddr() == false, + // because a map[string]struct{} does not allow a pointer reference. + // Calling a custom conversion function defined for the map value + // will panic. Example is PodInfo map[string]ContainerStatus. + if err := c.convert(sv.MapIndex(sk), dkv, scope); err != nil { + return err + } + dv.SetMapIndex(dk, dkv) + } + case reflect.Interface: + if sv.IsNil() { + // Don't copy a nil interface! + dv.Set(reflect.Zero(dt)) + return nil + } + tmpdv := reflect.New(sv.Elem().Type()).Elem() + if err := c.convert(sv.Elem(), tmpdv, scope); err != nil { + return err + } + dv.Set(reflect.ValueOf(tmpdv.Interface())) + return nil + default: + return scope.errorf("couldn't copy '%v' into '%v'; didn't understand types", st, dt) + } + return nil +} + +var stringType = reflect.TypeOf("") + +func toKVValue(v reflect.Value) kvValue { + switch v.Kind() { + case reflect.Struct: + return structAdaptor(v) + case reflect.Map: + if v.Type().Key().AssignableTo(stringType) { + return stringMapAdaptor(v) + } + } + + return nil +} + +// kvValue lets us write the same conversion logic to work with both maps +// and structs. Only maps with string keys make sense for this. +type kvValue interface { + // returns all keys, as a []string. + keys() []string + // Will just return "" for maps. + tagOf(key string) reflect.StructTag + // Will return the zero Value if the key doesn't exist. + value(key string) reflect.Value + // Maps require explicit setting-- will do nothing for structs. + // Returns false on failure. + confirmSet(key string, v reflect.Value) bool +} + +type stringMapAdaptor reflect.Value + +func (a stringMapAdaptor) len() int { + return reflect.Value(a).Len() +} + +func (a stringMapAdaptor) keys() []string { + v := reflect.Value(a) + keys := make([]string, v.Len()) + for i, v := range v.MapKeys() { + if v.IsNil() { + continue + } + switch t := v.Interface().(type) { + case string: + keys[i] = t + } + } + return keys +} + +func (a stringMapAdaptor) tagOf(key string) reflect.StructTag { + return "" +} + +func (a stringMapAdaptor) value(key string) reflect.Value { + return reflect.Value(a).MapIndex(reflect.ValueOf(key)) +} + +func (a stringMapAdaptor) confirmSet(key string, v reflect.Value) bool { + return true +} + +type structAdaptor reflect.Value + +func (a structAdaptor) len() int { + v := reflect.Value(a) + return v.Type().NumField() +} + +func (a structAdaptor) keys() []string { + v := reflect.Value(a) + t := v.Type() + keys := make([]string, t.NumField()) + for i := range keys { + keys[i] = t.Field(i).Name + } + return keys +} + +func (a structAdaptor) tagOf(key string) reflect.StructTag { + v := reflect.Value(a) + field, ok := v.Type().FieldByName(key) + if ok { + return field.Tag + } + return "" +} + +func (a structAdaptor) value(key string) reflect.Value { + v := reflect.Value(a) + return v.FieldByName(key) +} + +func (a structAdaptor) confirmSet(key string, v reflect.Value) bool { + return true +} + +// convertKV can convert things that consist of key/value pairs, like structs +// and some maps. +func (c *Converter) convertKV(skv, dkv kvValue, scope *scope) error { + if skv == nil || dkv == nil { + // TODO: add keys to stack to support really understandable error messages. + return fmt.Errorf("Unable to convert %#v to %#v", skv, dkv) + } + + lister := dkv + if scope.flags.IsSet(SourceToDest) { + lister = skv + } + + var mapping FieldMappingFunc + if scope.meta != nil && scope.meta.KeyNameMapping != nil { + mapping = scope.meta.KeyNameMapping + } + + for _, key := range lister.keys() { + if found, err := c.checkField(key, skv, dkv, scope); found { + if err != nil { + return err + } + continue + } + stag := skv.tagOf(key) + dtag := dkv.tagOf(key) + skey := key + dkey := key + if mapping != nil { + skey, dkey = scope.meta.KeyNameMapping(key, stag, dtag) + } + + df := dkv.value(dkey) + sf := skv.value(skey) + if !df.IsValid() || !sf.IsValid() { + switch { + case scope.flags.IsSet(IgnoreMissingFields): + // No error. + case scope.flags.IsSet(SourceToDest): + return scope.errorf("%v not present in dest", dkey) + default: + return scope.errorf("%v not present in src", skey) + } + continue + } + scope.srcStack.top().key = skey + scope.srcStack.top().tag = stag + scope.destStack.top().key = dkey + scope.destStack.top().tag = dtag + if err := c.convert(sf, df, scope); err != nil { + return err + } + } + return nil +} + +// checkField returns true if the field name matches any of the struct +// field copying rules. The error should be ignored if it returns false. +func (c *Converter) checkField(fieldName string, skv, dkv kvValue, scope *scope) (bool, error) { + replacementMade := false + if scope.flags.IsSet(DestFromSource) { + df := dkv.value(fieldName) + if !df.IsValid() { + return false, nil + } + destKey := typeNamePair{df.Type(), fieldName} + // Check each of the potential source (type, name) pairs to see if they're + // present in sv. + for _, potentialSourceKey := range c.structFieldSources[destKey] { + sf := skv.value(potentialSourceKey.fieldName) + if !sf.IsValid() { + continue + } + if sf.Type() == potentialSourceKey.fieldType { + // Both the source's name and type matched, so copy. + scope.srcStack.top().key = potentialSourceKey.fieldName + scope.destStack.top().key = fieldName + if err := c.convert(sf, df, scope); err != nil { + return true, err + } + dkv.confirmSet(fieldName, df) + replacementMade = true + } + } + return replacementMade, nil + } + + sf := skv.value(fieldName) + if !sf.IsValid() { + return false, nil + } + srcKey := typeNamePair{sf.Type(), fieldName} + // Check each of the potential dest (type, name) pairs to see if they're + // present in dv. + for _, potentialDestKey := range c.structFieldDests[srcKey] { + df := dkv.value(potentialDestKey.fieldName) + if !df.IsValid() { + continue + } + if df.Type() == potentialDestKey.fieldType { + // Both the dest's name and type matched, so copy. + scope.srcStack.top().key = fieldName + scope.destStack.top().key = potentialDestKey.fieldName + if err := c.convert(sf, df, scope); err != nil { + return true, err + } + dkv.confirmSet(potentialDestKey.fieldName, df) + replacementMade = true + } + } + return replacementMade, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go new file mode 100644 index 000000000..f21abe1e5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go @@ -0,0 +1,36 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "k8s.io/apimachinery/third_party/forked/golang/reflect" +) + +// The code for this type must be located in third_party, since it forks from +// go std lib. But for convenience, we expose the type here, too. +type Equalities struct { + reflect.Equalities +} + +// For convenience, panics on errors +func EqualitiesOrDie(funcs ...interface{}) Equalities { + e := Equalities{reflect.Equalities{}} + if err := e.AddFuncs(funcs...); err != nil { + panic(err) + } + return e +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go new file mode 100644 index 000000000..7415d8164 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package conversion provides go object versioning. +// +// Specifically, conversion provides a way for you to define multiple versions +// of the same object. You may write functions which implement conversion logic, +// but for the fields which did not change, copying is automated. This makes it +// easy to modify the structures you use in memory without affecting the format +// you store on disk or respond to in your external API calls. +package conversion // import "k8s.io/apimachinery/pkg/conversion" diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/helper.go b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go new file mode 100644 index 000000000..4ebc1ebc5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go @@ -0,0 +1,39 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "fmt" + "reflect" +) + +// EnforcePtr ensures that obj is a pointer of some sort. Returns a reflect.Value +// of the dereferenced pointer, ensuring that it is settable/addressable. +// Returns an error if this is not possible. +func EnforcePtr(obj interface{}) (reflect.Value, error) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + if v.Kind() == reflect.Invalid { + return reflect.Value{}, fmt.Errorf("expected pointer, but got invalid kind") + } + return reflect.Value{}, fmt.Errorf("expected pointer, but got %v type", v.Type()) + } + if v.IsNil() { + return reflect.Value{}, fmt.Errorf("expected pointer, but got nil") + } + return v.Elem(), nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go new file mode 100644 index 000000000..30f717b2c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -0,0 +1,188 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queryparams + +import ( + "fmt" + "net/url" + "reflect" + "strings" +) + +// Marshaler converts an object to a query parameter string representation +type Marshaler interface { + MarshalQueryParameter() (string, error) +} + +// Unmarshaler converts a string representation to an object +type Unmarshaler interface { + UnmarshalQueryParameter(string) error +} + +func jsonTag(field reflect.StructField) (string, bool) { + structTag := field.Tag.Get("json") + if len(structTag) == 0 { + return "", false + } + parts := strings.Split(structTag, ",") + tag := parts[0] + if tag == "-" { + tag = "" + } + omitempty := false + parts = parts[1:] + for _, part := range parts { + if part == "omitempty" { + omitempty = true + break + } + } + return tag, omitempty +} + +func formatValue(value interface{}) string { + return fmt.Sprintf("%v", value) +} + +func isPointerKind(kind reflect.Kind) bool { + return kind == reflect.Ptr +} + +func isStructKind(kind reflect.Kind) bool { + return kind == reflect.Struct +} + +func isValueKind(kind reflect.Kind) bool { + switch kind { + case reflect.String, reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, + reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, + reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, + reflect.Float64, reflect.Complex64, reflect.Complex128: + return true + default: + return false + } +} + +func zeroValue(value reflect.Value) bool { + return reflect.DeepEqual(reflect.Zero(value.Type()).Interface(), value.Interface()) +} + +func customMarshalValue(value reflect.Value) (reflect.Value, bool) { + // Return unless we implement a custom query marshaler + if !value.CanInterface() { + return reflect.Value{}, false + } + + marshaler, ok := value.Interface().(Marshaler) + if !ok { + return reflect.Value{}, false + } + + // Don't invoke functions on nil pointers + // If the type implements MarshalQueryParameter, AND the tag is not omitempty, AND the value is a nil pointer, "" seems like a reasonable response + if isPointerKind(value.Kind()) && zeroValue(value) { + return reflect.ValueOf(""), true + } + + // Get the custom marshalled value + v, err := marshaler.MarshalQueryParameter() + if err != nil { + return reflect.Value{}, false + } + return reflect.ValueOf(v), true +} + +func addParam(values url.Values, tag string, omitempty bool, value reflect.Value) { + if omitempty && zeroValue(value) { + return + } + val := "" + iValue := fmt.Sprintf("%v", value.Interface()) + + if iValue != "" { + val = iValue + } + values.Add(tag, val) +} + +func addListOfParams(values url.Values, tag string, omitempty bool, list reflect.Value) { + for i := 0; i < list.Len(); i++ { + addParam(values, tag, omitempty, list.Index(i)) + } +} + +// Convert takes an object and converts it to a url.Values object using JSON tags as +// parameter names. Only top-level simple values, arrays, and slices are serialized. +// Embedded structs, maps, etc. will not be serialized. +func Convert(obj interface{}) (url.Values, error) { + result := url.Values{} + if obj == nil { + return result, nil + } + var sv reflect.Value + switch reflect.TypeOf(obj).Kind() { + case reflect.Ptr, reflect.Interface: + sv = reflect.ValueOf(obj).Elem() + default: + return nil, fmt.Errorf("expecting a pointer or interface") + } + st := sv.Type() + if !isStructKind(st.Kind()) { + return nil, fmt.Errorf("expecting a pointer to a struct") + } + + // Check all object fields + convertStruct(result, st, sv) + + return result, nil +} + +func convertStruct(result url.Values, st reflect.Type, sv reflect.Value) { + for i := 0; i < st.NumField(); i++ { + field := sv.Field(i) + tag, omitempty := jsonTag(st.Field(i)) + if len(tag) == 0 { + continue + } + ft := field.Type() + + kind := ft.Kind() + if isPointerKind(kind) { + ft = ft.Elem() + kind = ft.Kind() + if !field.IsNil() { + field = reflect.Indirect(field) + } + } + + switch { + case isValueKind(kind): + addParam(result, tag, omitempty, field) + case kind == reflect.Array || kind == reflect.Slice: + if isValueKind(ft.Elem().Kind()) { + addListOfParams(result, tag, omitempty, field) + } + case isStructKind(kind) && !(zeroValue(field) && omitempty): + if marshalValue, ok := customMarshalValue(field); ok { + addParam(result, tag, omitempty, marshalValue) + } else { + convertStruct(result, ft, field) + } + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go new file mode 100644 index 000000000..7b763de6f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package queryparams provides conversion from versioned +// runtime objects to URL query values +package queryparams // import "k8s.io/apimachinery/pkg/conversion/queryparams" diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go new file mode 100644 index 000000000..cf84a6198 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go @@ -0,0 +1,690 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructured + +import ( + "bytes" + encodingjson "encoding/json" + "fmt" + "math" + "os" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + + "github.com/golang/glog" +) + +// Converter is an interface for converting between interface{} +// and map[string]interface representation. +type Converter interface { + ToUnstructured(obj interface{}, u *map[string]interface{}) error + FromUnstructured(u map[string]interface{}, obj interface{}) error +} + +type structField struct { + structType reflect.Type + field int +} + +type fieldInfo struct { + name string + nameValue reflect.Value + omitempty bool +} + +type fieldsCacheMap map[structField]*fieldInfo + +type fieldsCache struct { + sync.Mutex + value atomic.Value +} + +func newFieldsCache() *fieldsCache { + cache := &fieldsCache{} + cache.value.Store(make(fieldsCacheMap)) + return cache +} + +var ( + marshalerType = reflect.TypeOf(new(encodingjson.Marshaler)).Elem() + unmarshalerType = reflect.TypeOf(new(encodingjson.Unmarshaler)).Elem() + mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) + stringType = reflect.TypeOf(string("")) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) + boolType = reflect.TypeOf(bool(false)) + fieldCache = newFieldsCache() + DefaultConverter = NewConverter(parseBool(os.Getenv("KUBE_PATCH_CONVERSION_DETECTOR"))) +) + +func parseBool(key string) bool { + if len(key) == 0 { + return false + } + value, err := strconv.ParseBool(key) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't parse '%s' as bool for unstructured mismatch detection", key)) + } + return value +} + +// ConverterImpl knows how to convert between interface{} and +// Unstructured in both ways. +type converterImpl struct { + // If true, we will be additionally running conversion via json + // to ensure that the result is true. + // This is supposed to be set only in tests. + mismatchDetection bool +} + +func NewConverter(mismatchDetection bool) Converter { + return &converterImpl{ + mismatchDetection: mismatchDetection, + } +} + +func (c *converterImpl) FromUnstructured(u map[string]interface{}, obj interface{}) error { + t := reflect.TypeOf(obj) + value := reflect.ValueOf(obj) + if t.Kind() != reflect.Ptr || value.IsNil() { + return fmt.Errorf("FromUnstructured requires a non-nil pointer to an object, got %v", t) + } + err := fromUnstructured(reflect.ValueOf(u), value.Elem()) + if c.mismatchDetection { + newObj := reflect.New(t.Elem()).Interface() + newErr := fromUnstructuredViaJSON(u, newObj) + if (err != nil) != (newErr != nil) { + glog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) + } + if err == nil && !apiequality.Semantic.DeepEqual(obj, newObj) { + glog.Fatalf("FromUnstructured mismatch for %#v, diff: %v", obj, diff.ObjectReflectDiff(obj, newObj)) + } + } + return err +} + +func fromUnstructuredViaJSON(u map[string]interface{}, obj interface{}) error { + data, err := json.Marshal(u) + if err != nil { + return err + } + return json.Unmarshal(data, obj) +} + +func fromUnstructured(sv, dv reflect.Value) error { + sv = unwrapInterface(sv) + if !sv.IsValid() { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + st, dt := sv.Type(), dv.Type() + + switch dt.Kind() { + case reflect.Map, reflect.Slice, reflect.Ptr, reflect.Struct, reflect.Interface: + // Those require non-trivial conversion. + default: + // This should handle all simple types. + if st.AssignableTo(dt) { + dv.Set(sv) + return nil + } + // We cannot simply use "ConvertibleTo", as JSON doesn't support conversions + // between those four groups: bools, integers, floats and string. We need to + // do the same. + if st.ConvertibleTo(dt) { + switch st.Kind() { + case reflect.String: + switch dt.Kind() { + case reflect.String: + dv.Set(sv.Convert(dt)) + return nil + } + case reflect.Bool: + switch dt.Kind() { + case reflect.Bool: + dv.Set(sv.Convert(dt)) + return nil + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch dt.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + dv.Set(sv.Convert(dt)) + return nil + } + case reflect.Float32, reflect.Float64: + switch dt.Kind() { + case reflect.Float32, reflect.Float64: + dv.Set(sv.Convert(dt)) + return nil + } + if sv.Float() == math.Trunc(sv.Float()) { + dv.Set(sv.Convert(dt)) + return nil + } + } + return fmt.Errorf("cannot convert %s to %s", st.String(), dt.String()) + } + } + + // Check if the object has a custom JSON marshaller/unmarshaller. + if reflect.PtrTo(dt).Implements(unmarshalerType) { + data, err := json.Marshal(sv.Interface()) + if err != nil { + return fmt.Errorf("error encoding %s to json: %v", st.String(), err) + } + unmarshaler := dv.Addr().Interface().(encodingjson.Unmarshaler) + return unmarshaler.UnmarshalJSON(data) + } + + switch dt.Kind() { + case reflect.Map: + return mapFromUnstructured(sv, dv) + case reflect.Slice: + return sliceFromUnstructured(sv, dv) + case reflect.Ptr: + return pointerFromUnstructured(sv, dv) + case reflect.Struct: + return structFromUnstructured(sv, dv) + case reflect.Interface: + return interfaceFromUnstructured(sv, dv) + default: + return fmt.Errorf("unrecognized type: %v", dt.Kind()) + } +} + +func fieldInfoFromField(structType reflect.Type, field int) *fieldInfo { + fieldCacheMap := fieldCache.value.Load().(fieldsCacheMap) + if info, ok := fieldCacheMap[structField{structType, field}]; ok { + return info + } + + // Cache miss - we need to compute the field name. + info := &fieldInfo{} + typeField := structType.Field(field) + jsonTag := typeField.Tag.Get("json") + if len(jsonTag) == 0 { + // Make the first character lowercase. + if typeField.Name == "" { + info.name = typeField.Name + } else { + info.name = strings.ToLower(typeField.Name[:1]) + typeField.Name[1:] + } + } else { + items := strings.Split(jsonTag, ",") + info.name = items[0] + for i := range items { + if items[i] == "omitempty" { + info.omitempty = true + } + } + } + info.nameValue = reflect.ValueOf(info.name) + + fieldCache.Lock() + defer fieldCache.Unlock() + fieldCacheMap = fieldCache.value.Load().(fieldsCacheMap) + newFieldCacheMap := make(fieldsCacheMap) + for k, v := range fieldCacheMap { + newFieldCacheMap[k] = v + } + newFieldCacheMap[structField{structType, field}] = info + fieldCache.value.Store(newFieldCacheMap) + return info +} + +func unwrapInterface(v reflect.Value) reflect.Value { + for v.Kind() == reflect.Interface { + v = v.Elem() + } + return v +} + +func mapFromUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if st.Kind() != reflect.Map { + return fmt.Errorf("cannot restore map from %v", st.Kind()) + } + + if !st.Key().AssignableTo(dt.Key()) && !st.Key().ConvertibleTo(dt.Key()) { + return fmt.Errorf("cannot copy map with non-assignable keys: %v %v", st.Key(), dt.Key()) + } + + if sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.MakeMap(dt)) + for _, key := range sv.MapKeys() { + value := reflect.New(dt.Elem()).Elem() + if val := unwrapInterface(sv.MapIndex(key)); val.IsValid() { + if err := fromUnstructured(val, value); err != nil { + return err + } + } else { + value.Set(reflect.Zero(dt.Elem())) + } + if st.Key().AssignableTo(dt.Key()) { + dv.SetMapIndex(key, value) + } else { + dv.SetMapIndex(key.Convert(dt.Key()), value) + } + } + return nil +} + +func sliceFromUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if st.Kind() == reflect.String && dt.Elem().Kind() == reflect.Uint8 { + // We store original []byte representation as string. + // This conversion is allowed, but we need to be careful about + // marshaling data appropriately. + if len(sv.Interface().(string)) > 0 { + marshalled, err := json.Marshal(sv.Interface()) + if err != nil { + return fmt.Errorf("error encoding %s to json: %v", st, err) + } + // TODO: Is this Unmarshal needed? + var data []byte + err = json.Unmarshal(marshalled, &data) + if err != nil { + return fmt.Errorf("error decoding from json: %v", err) + } + dv.SetBytes(data) + } else { + dv.Set(reflect.Zero(dt)) + } + return nil + } + if st.Kind() != reflect.Slice { + return fmt.Errorf("cannot restore slice from %v", st.Kind()) + } + + if sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap())) + for i := 0; i < sv.Len(); i++ { + if err := fromUnstructured(sv.Index(i), dv.Index(i)); err != nil { + return err + } + } + return nil +} + +func pointerFromUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + + if st.Kind() == reflect.Ptr && sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.New(dt.Elem())) + switch st.Kind() { + case reflect.Ptr, reflect.Interface: + return fromUnstructured(sv.Elem(), dv.Elem()) + default: + return fromUnstructured(sv, dv.Elem()) + } +} + +func structFromUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if st.Kind() != reflect.Map { + return fmt.Errorf("cannot restore struct from: %v", st.Kind()) + } + + for i := 0; i < dt.NumField(); i++ { + fieldInfo := fieldInfoFromField(dt, i) + fv := dv.Field(i) + + if len(fieldInfo.name) == 0 { + // This field is inlined. + if err := fromUnstructured(sv, fv); err != nil { + return err + } + } else { + value := unwrapInterface(sv.MapIndex(fieldInfo.nameValue)) + if value.IsValid() { + if err := fromUnstructured(value, fv); err != nil { + return err + } + } else { + fv.Set(reflect.Zero(fv.Type())) + } + } + } + return nil +} + +func interfaceFromUnstructured(sv, dv reflect.Value) error { + // TODO: Is this conversion safe? + dv.Set(sv) + return nil +} + +func (c *converterImpl) ToUnstructured(obj interface{}, u *map[string]interface{}) error { + t := reflect.TypeOf(obj) + value := reflect.ValueOf(obj) + if t.Kind() != reflect.Ptr || value.IsNil() { + return fmt.Errorf("ToUnstructured requires a non-nil pointer to an object, got %v", t) + } + err := toUnstructured(value.Elem(), reflect.ValueOf(u).Elem()) + if c.mismatchDetection { + newUnstr := &map[string]interface{}{} + newErr := toUnstructuredViaJSON(obj, newUnstr) + if (err != nil) != (newErr != nil) { + glog.Fatalf("ToUnstructured unexpected error for %v: error: %v", obj, err) + } + if err == nil && !apiequality.Semantic.DeepEqual(u, newUnstr) { + glog.Fatalf("ToUnstructured mismatch for %#v, diff: %v", u, diff.ObjectReflectDiff(u, newUnstr)) + } + } + return err +} + +func toUnstructuredViaJSON(obj interface{}, u *map[string]interface{}) error { + data, err := json.Marshal(obj) + if err != nil { + return err + } + return json.Unmarshal(data, u) +} + +func toUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + + // Check if the object has a custom JSON marshaller/unmarshaller. + if st.Implements(marshalerType) { + if sv.Kind() == reflect.Ptr && sv.IsNil() { + // We're done - we don't need to store anything. + return nil + } + + marshaler := sv.Interface().(encodingjson.Marshaler) + data, err := marshaler.MarshalJSON() + if err != nil { + return err + } + if bytes.Equal(data, []byte("null")) { + // We're done - we don't need to store anything. + } else { + switch { + case len(data) > 0 && data[0] == '"': + var result string + err := json.Unmarshal(data, &result) + if err != nil { + return fmt.Errorf("error decoding from json: %v", err) + } + dv.Set(reflect.ValueOf(result)) + case len(data) > 0 && data[0] == '{': + result := make(map[string]interface{}) + err := json.Unmarshal(data, &result) + if err != nil { + return fmt.Errorf("error decoding from json: %v", err) + } + dv.Set(reflect.ValueOf(result)) + default: + var result int64 + err := json.Unmarshal(data, &result) + if err != nil { + return fmt.Errorf("error decoding from json: %v", err) + } + dv.Set(reflect.ValueOf(result)) + } + } + return nil + } + + switch st.Kind() { + case reflect.String: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(stringType)) + } + dv.Set(reflect.ValueOf(sv.String())) + return nil + case reflect.Bool: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(boolType)) + } + dv.Set(reflect.ValueOf(sv.Bool())) + return nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(int64Type)) + } + dv.Set(reflect.ValueOf(sv.Int())) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(uint64Type)) + } + dv.Set(reflect.ValueOf(sv.Uint())) + return nil + case reflect.Float32, reflect.Float64: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(float64Type)) + } + dv.Set(reflect.ValueOf(sv.Float())) + return nil + case reflect.Map: + return mapToUnstructured(sv, dv) + case reflect.Slice: + return sliceToUnstructured(sv, dv) + case reflect.Ptr: + return pointerToUnstructured(sv, dv) + case reflect.Struct: + return structToUnstructured(sv, dv) + case reflect.Interface: + return interfaceToUnstructured(sv, dv) + default: + return fmt.Errorf("unrecognized type: %v", st.Kind()) + } +} + +func mapToUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + if st.Key().Kind() == reflect.String { + switch st.Elem().Kind() { + // TODO It should be possible to reuse the slice for primitive types. + // However, it is panicing in the following form. + // case reflect.String, reflect.Bool, + // reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + // reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // sv.Set(sv) + // return nil + default: + // We need to do a proper conversion. + } + } + dv.Set(reflect.MakeMap(mapStringInterfaceType)) + dv = dv.Elem() + dt = dv.Type() + } + if dt.Kind() != reflect.Map { + return fmt.Errorf("cannot convert struct to: %v", dt.Kind()) + } + + if !st.Key().AssignableTo(dt.Key()) && !st.Key().ConvertibleTo(dt.Key()) { + return fmt.Errorf("cannot copy map with non-assignable keys: %v %v", st.Key(), dt.Key()) + } + + for _, key := range sv.MapKeys() { + value := reflect.New(dt.Elem()).Elem() + if err := toUnstructured(sv.MapIndex(key), value); err != nil { + return err + } + if st.Key().AssignableTo(dt.Key()) { + dv.SetMapIndex(key, value) + } else { + dv.SetMapIndex(key.Convert(dt.Key()), value) + } + } + return nil +} + +func sliceToUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + if st.Elem().Kind() == reflect.Uint8 { + dv.Set(reflect.New(stringType)) + data, err := json.Marshal(sv.Bytes()) + if err != nil { + return err + } + var result string + if err = json.Unmarshal(data, &result); err != nil { + return err + } + dv.Set(reflect.ValueOf(result)) + return nil + } + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + switch st.Elem().Kind() { + // TODO It should be possible to reuse the slice for primitive types. + // However, it is panicing in the following form. + // case reflect.String, reflect.Bool, + // reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + // reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // sv.Set(sv) + // return nil + default: + // We need to do a proper conversion. + dv.Set(reflect.MakeSlice(reflect.SliceOf(dt), sv.Len(), sv.Cap())) + dv = dv.Elem() + dt = dv.Type() + } + } + if dt.Kind() != reflect.Slice { + return fmt.Errorf("cannot convert slice to: %v", dt.Kind()) + } + for i := 0; i < sv.Len(); i++ { + if err := toUnstructured(sv.Index(i), dv.Index(i)); err != nil { + return err + } + } + return nil +} + +func pointerToUnstructured(sv, dv reflect.Value) error { + if sv.IsNil() { + // We're done - we don't need to store anything. + return nil + } + return toUnstructured(sv.Elem(), dv) +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Map, reflect.Slice: + // TODO: It seems that 0-len maps are ignored in it. + return v.IsNil() || v.Len() == 0 + case reflect.Ptr, reflect.Interface: + return v.IsNil() + } + return false +} + +func structToUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.MakeMap(mapStringInterfaceType)) + dv = dv.Elem() + dt = dv.Type() + } + if dt.Kind() != reflect.Map { + return fmt.Errorf("cannot convert struct to: %v", dt.Kind()) + } + realMap := dv.Interface().(map[string]interface{}) + + for i := 0; i < st.NumField(); i++ { + fieldInfo := fieldInfoFromField(st, i) + fv := sv.Field(i) + + if fieldInfo.name == "-" { + // This field should be skipped. + continue + } + if fieldInfo.omitempty && isZero(fv) { + // omitempty fields should be ignored. + continue + } + if len(fieldInfo.name) == 0 { + // This field is inlined. + if err := toUnstructured(fv, dv); err != nil { + return err + } + continue + } + switch fv.Type().Kind() { + case reflect.String: + realMap[fieldInfo.name] = fv.String() + case reflect.Bool: + realMap[fieldInfo.name] = fv.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + realMap[fieldInfo.name] = fv.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + realMap[fieldInfo.name] = fv.Uint() + case reflect.Float32, reflect.Float64: + realMap[fieldInfo.name] = fv.Float() + default: + subv := reflect.New(dt.Elem()).Elem() + if err := toUnstructured(fv, subv); err != nil { + return err + } + dv.SetMapIndex(fieldInfo.nameValue, subv) + } + } + return nil +} + +func interfaceToUnstructured(sv, dv reflect.Value) error { + if !sv.IsValid() || sv.IsNil() { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + return toUnstructured(sv.Elem(), dv) +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/doc.go new file mode 100644 index 000000000..cd40e74be --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package unstructured provides conversion from runtime objects +// to map[string]interface{} representation. +package unstructured // import "k8s.io/apimachinery/pkg/conversion/unstructured" diff --git a/vendor/k8s.io/apimachinery/pkg/fields/doc.go b/vendor/k8s.io/apimachinery/pkg/fields/doc.go new file mode 100644 index 000000000..c39b8039a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fields implements a simple field system, parsing and matching +// selectors with sets of fields. +package fields // import "k8s.io/apimachinery/pkg/fields" diff --git a/vendor/k8s.io/apimachinery/pkg/fields/fields.go b/vendor/k8s.io/apimachinery/pkg/fields/fields.go new file mode 100644 index 000000000..623b27e95 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/fields.go @@ -0,0 +1,62 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fields + +import ( + "sort" + "strings" +) + +// Fields allows you to present fields independently from their storage. +type Fields interface { + // Has returns whether the provided field exists. + Has(field string) (exists bool) + + // Get returns the value for the provided field. + Get(field string) (value string) +} + +// Set is a map of field:value. It implements Fields. +type Set map[string]string + +// String returns all fields listed as a human readable string. +// Conveniently, exactly the format that ParseSelector takes. +func (ls Set) String() string { + selector := make([]string, 0, len(ls)) + for key, value := range ls { + selector = append(selector, key+"="+value) + } + // Sort for determinism. + sort.StringSlice(selector).Sort() + return strings.Join(selector, ",") +} + +// Has returns whether the provided field exists in the map. +func (ls Set) Has(field string) bool { + _, exists := ls[field] + return exists +} + +// Get returns the value in the map for the provided field. +func (ls Set) Get(field string) string { + return ls[field] +} + +// AsSelector converts fields into a selectors. +func (ls Set) AsSelector() Selector { + return SelectorFromSet(ls) +} diff --git a/vendor/k8s.io/apimachinery/pkg/fields/requirements.go b/vendor/k8s.io/apimachinery/pkg/fields/requirements.go new file mode 100644 index 000000000..70d94ded8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/requirements.go @@ -0,0 +1,30 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fields + +import "k8s.io/apimachinery/pkg/selection" + +// Requirements is AND of all requirements. +type Requirements []Requirement + +// Requirement contains a field, a value, and an operator that relates the field and value. +// This is currently for reading internal selection information of field selector. +type Requirement struct { + Operator selection.Operator + Field string + Value string +} diff --git a/vendor/k8s.io/apimachinery/pkg/fields/selector.go b/vendor/k8s.io/apimachinery/pkg/fields/selector.go new file mode 100644 index 000000000..1305dde08 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/selector.go @@ -0,0 +1,423 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fields + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/selection" +) + +// Selector represents a field selector. +type Selector interface { + // Matches returns true if this selector matches the given set of fields. + Matches(Fields) bool + + // Empty returns true if this selector does not restrict the selection space. + Empty() bool + + // RequiresExactMatch allows a caller to introspect whether a given selector + // requires a single specific field to be set, and if so returns the value it + // requires. + RequiresExactMatch(field string) (value string, found bool) + + // Transform returns a new copy of the selector after TransformFunc has been + // applied to the entire selector, or an error if fn returns an error. + // If for a given requirement both field and value are transformed to empty + // string, the requirement is skipped. + Transform(fn TransformFunc) (Selector, error) + + // Requirements converts this interface to Requirements to expose + // more detailed selection information. + Requirements() Requirements + + // String returns a human readable string that represents this selector. + String() string +} + +// Everything returns a selector that matches all fields. +func Everything() Selector { + return andTerm{} +} + +type hasTerm struct { + field, value string +} + +func (t *hasTerm) Matches(ls Fields) bool { + return ls.Get(t.field) == t.value +} + +func (t *hasTerm) Empty() bool { + return false +} + +func (t *hasTerm) RequiresExactMatch(field string) (value string, found bool) { + if t.field == field { + return t.value, true + } + return "", false +} + +func (t *hasTerm) Transform(fn TransformFunc) (Selector, error) { + field, value, err := fn(t.field, t.value) + if err != nil { + return nil, err + } + if len(field) == 0 && len(value) == 0 { + return Everything(), nil + } + return &hasTerm{field, value}, nil +} + +func (t *hasTerm) Requirements() Requirements { + return []Requirement{{ + Field: t.field, + Operator: selection.Equals, + Value: t.value, + }} +} + +func (t *hasTerm) String() string { + return fmt.Sprintf("%v=%v", t.field, EscapeValue(t.value)) +} + +type notHasTerm struct { + field, value string +} + +func (t *notHasTerm) Matches(ls Fields) bool { + return ls.Get(t.field) != t.value +} + +func (t *notHasTerm) Empty() bool { + return false +} + +func (t *notHasTerm) RequiresExactMatch(field string) (value string, found bool) { + return "", false +} + +func (t *notHasTerm) Transform(fn TransformFunc) (Selector, error) { + field, value, err := fn(t.field, t.value) + if err != nil { + return nil, err + } + if len(field) == 0 && len(value) == 0 { + return Everything(), nil + } + return ¬HasTerm{field, value}, nil +} + +func (t *notHasTerm) Requirements() Requirements { + return []Requirement{{ + Field: t.field, + Operator: selection.NotEquals, + Value: t.value, + }} +} + +func (t *notHasTerm) String() string { + return fmt.Sprintf("%v!=%v", t.field, EscapeValue(t.value)) +} + +type andTerm []Selector + +func (t andTerm) Matches(ls Fields) bool { + for _, q := range t { + if !q.Matches(ls) { + return false + } + } + return true +} + +func (t andTerm) Empty() bool { + if t == nil { + return true + } + if len([]Selector(t)) == 0 { + return true + } + for i := range t { + if !t[i].Empty() { + return false + } + } + return true +} + +func (t andTerm) RequiresExactMatch(field string) (string, bool) { + if t == nil || len([]Selector(t)) == 0 { + return "", false + } + for i := range t { + if value, found := t[i].RequiresExactMatch(field); found { + return value, found + } + } + return "", false +} + +func (t andTerm) Transform(fn TransformFunc) (Selector, error) { + next := make([]Selector, 0, len([]Selector(t))) + for _, s := range []Selector(t) { + n, err := s.Transform(fn) + if err != nil { + return nil, err + } + if !n.Empty() { + next = append(next, n) + } + } + return andTerm(next), nil +} + +func (t andTerm) Requirements() Requirements { + reqs := make([]Requirement, 0, len(t)) + for _, s := range []Selector(t) { + rs := s.Requirements() + reqs = append(reqs, rs...) + } + return reqs +} + +func (t andTerm) String() string { + var terms []string + for _, q := range t { + terms = append(terms, q.String()) + } + return strings.Join(terms, ",") +} + +// SelectorFromSet returns a Selector which will match exactly the given Set. A +// nil Set is considered equivalent to Everything(). +func SelectorFromSet(ls Set) Selector { + if ls == nil { + return Everything() + } + items := make([]Selector, 0, len(ls)) + for field, value := range ls { + items = append(items, &hasTerm{field: field, value: value}) + } + if len(items) == 1 { + return items[0] + } + return andTerm(items) +} + +// valueEscaper prefixes \,= characters with a backslash +var valueEscaper = strings.NewReplacer( + // escape \ characters + `\`, `\\`, + // then escape , and = characters to allow unambiguous parsing of the value in a fieldSelector + `,`, `\,`, + `=`, `\=`, +) + +// EscapeValue escapes an arbitrary literal string for use as a fieldSelector value +func EscapeValue(s string) string { + return valueEscaper.Replace(s) +} + +// InvalidEscapeSequence indicates an error occurred unescaping a field selector +type InvalidEscapeSequence struct { + sequence string +} + +func (i InvalidEscapeSequence) Error() string { + return fmt.Sprintf("invalid field selector: invalid escape sequence: %s", i.sequence) +} + +// UnescapedRune indicates an error occurred unescaping a field selector +type UnescapedRune struct { + r rune +} + +func (i UnescapedRune) Error() string { + return fmt.Sprintf("invalid field selector: unescaped character in value: %v", i.r) +} + +// UnescapeValue unescapes a fieldSelector value and returns the original literal value. +// May return the original string if it contains no escaped or special characters. +func UnescapeValue(s string) (string, error) { + // if there's no escaping or special characters, just return to avoid allocation + if !strings.ContainsAny(s, `\,=`) { + return s, nil + } + + v := bytes.NewBuffer(make([]byte, 0, len(s))) + inSlash := false + for _, c := range s { + if inSlash { + switch c { + case '\\', ',', '=': + // omit the \ for recognized escape sequences + v.WriteRune(c) + default: + // error on unrecognized escape sequences + return "", InvalidEscapeSequence{sequence: string([]rune{'\\', c})} + } + inSlash = false + continue + } + + switch c { + case '\\': + inSlash = true + case ',', '=': + // unescaped , and = characters are not allowed in field selector values + return "", UnescapedRune{r: c} + default: + v.WriteRune(c) + } + } + + // Ending with a single backslash is an invalid sequence + if inSlash { + return "", InvalidEscapeSequence{sequence: "\\"} + } + + return v.String(), nil +} + +// ParseSelectorOrDie takes a string representing a selector and returns an +// object suitable for matching, or panic when an error occur. +func ParseSelectorOrDie(s string) Selector { + selector, err := ParseSelector(s) + if err != nil { + panic(err) + } + return selector +} + +// ParseSelector takes a string representing a selector and returns an +// object suitable for matching, or an error. +func ParseSelector(selector string) (Selector, error) { + return parseSelector(selector, + func(lhs, rhs string) (newLhs, newRhs string, err error) { + return lhs, rhs, nil + }) +} + +// ParseAndTransformSelector parses the selector and runs them through the given TransformFunc. +func ParseAndTransformSelector(selector string, fn TransformFunc) (Selector, error) { + return parseSelector(selector, fn) +} + +// TransformFunc transforms selectors. +type TransformFunc func(field, value string) (newField, newValue string, err error) + +// splitTerms returns the comma-separated terms contained in the given fieldSelector. +// Backslash-escaped commas are treated as data instead of delimiters, and are included in the returned terms, with the leading backslash preserved. +func splitTerms(fieldSelector string) []string { + if len(fieldSelector) == 0 { + return nil + } + + terms := make([]string, 0, 1) + startIndex := 0 + inSlash := false + for i, c := range fieldSelector { + switch { + case inSlash: + inSlash = false + case c == '\\': + inSlash = true + case c == ',': + terms = append(terms, fieldSelector[startIndex:i]) + startIndex = i + 1 + } + } + + terms = append(terms, fieldSelector[startIndex:]) + + return terms +} + +const ( + notEqualOperator = "!=" + doubleEqualOperator = "==" + equalOperator = "=" +) + +// termOperators holds the recognized operators supported in fieldSelectors. +// doubleEqualOperator and equal are equivalent, but doubleEqualOperator is checked first +// to avoid leaving a leading = character on the rhs value. +var termOperators = []string{notEqualOperator, doubleEqualOperator, equalOperator} + +// splitTerm returns the lhs, operator, and rhs parsed from the given term, along with an indicator of whether the parse was successful. +// no escaping of special characters is supported in the lhs value, so the first occurance of a recognized operator is used as the split point. +// the literal rhs is returned, and the caller is responsible for applying any desired unescaping. +func splitTerm(term string) (lhs, op, rhs string, ok bool) { + for i := range term { + remaining := term[i:] + for _, op := range termOperators { + if strings.HasPrefix(remaining, op) { + return term[0:i], op, term[i+len(op):], true + } + } + } + return "", "", "", false +} + +func parseSelector(selector string, fn TransformFunc) (Selector, error) { + parts := splitTerms(selector) + sort.StringSlice(parts).Sort() + var items []Selector + for _, part := range parts { + if part == "" { + continue + } + lhs, op, rhs, ok := splitTerm(part) + if !ok { + return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part) + } + unescapedRHS, err := UnescapeValue(rhs) + if err != nil { + return nil, err + } + switch op { + case notEqualOperator: + items = append(items, ¬HasTerm{field: lhs, value: unescapedRHS}) + case doubleEqualOperator: + items = append(items, &hasTerm{field: lhs, value: unescapedRHS}) + case equalOperator: + items = append(items, &hasTerm{field: lhs, value: unescapedRHS}) + default: + return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part) + } + } + if len(items) == 1 { + return items[0].Transform(fn) + } + return andTerm(items).Transform(fn) +} + +// OneTermEqualSelector returns an object that matches objects where one field/field equals one value. +// Cannot return an error. +func OneTermEqualSelector(k, v string) Selector { + return &hasTerm{field: k, value: v} +} + +// AndSelectors creates a selector that is the logical AND of all the given selectors +func AndSelectors(selectors ...Selector) Selector { + return andTerm(selectors) +} diff --git a/vendor/k8s.io/apimachinery/pkg/labels/doc.go b/vendor/k8s.io/apimachinery/pkg/labels/doc.go new file mode 100644 index 000000000..82de0051b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package labels implements a simple label system, parsing and matching +// selectors with sets of labels. +package labels // import "k8s.io/apimachinery/pkg/labels" diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go new file mode 100644 index 000000000..32db4d96f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -0,0 +1,181 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "fmt" + "sort" + "strings" +) + +// Labels allows you to present labels independently from their storage. +type Labels interface { + // Has returns whether the provided label exists. + Has(label string) (exists bool) + + // Get returns the value for the provided label. + Get(label string) (value string) +} + +// Set is a map of label:value. It implements Labels. +type Set map[string]string + +// String returns all labels listed as a human readable string. +// Conveniently, exactly the format that ParseSelector takes. +func (ls Set) String() string { + selector := make([]string, 0, len(ls)) + for key, value := range ls { + selector = append(selector, key+"="+value) + } + // Sort for determinism. + sort.StringSlice(selector).Sort() + return strings.Join(selector, ",") +} + +// Has returns whether the provided label exists in the map. +func (ls Set) Has(label string) bool { + _, exists := ls[label] + return exists +} + +// Get returns the value in the map for the provided label. +func (ls Set) Get(label string) string { + return ls[label] +} + +// AsSelector converts labels into a selectors. +func (ls Set) AsSelector() Selector { + return SelectorFromSet(ls) +} + +// AsSelectorPreValidated converts labels into a selector, but +// assumes that labels are already validated and thus don't +// preform any validation. +// According to our measurements this is significantly faster +// in codepaths that matter at high scale. +func (ls Set) AsSelectorPreValidated() Selector { + return SelectorFromValidatedSet(ls) +} + +// FormatLabels convert label map into plain string +func FormatLabels(labelMap map[string]string) string { + l := Set(labelMap).String() + if l == "" { + l = "" + } + return l +} + +// Conflicts takes 2 maps and returns true if there a key match between +// the maps but the value doesn't match, and returns false in other cases +func Conflicts(labels1, labels2 Set) bool { + small := labels1 + big := labels2 + if len(labels2) < len(labels1) { + small = labels2 + big = labels1 + } + + for k, v := range small { + if val, match := big[k]; match { + if val != v { + return true + } + } + } + + return false +} + +// Merge combines given maps, and does not check for any conflicts +// between the maps. In case of conflicts, second map (labels2) wins +func Merge(labels1, labels2 Set) Set { + mergedMap := Set{} + + for k, v := range labels1 { + mergedMap[k] = v + } + for k, v := range labels2 { + mergedMap[k] = v + } + return mergedMap +} + +// Equals returns true if the given maps are equal +func Equals(labels1, labels2 Set) bool { + if len(labels1) != len(labels2) { + return false + } + + for k, v := range labels1 { + value, ok := labels2[k] + if !ok { + return false + } + if value != v { + return false + } + } + return true +} + +// AreLabelsInWhiteList verifies if the provided label list +// is in the provided whitelist and returns true, otherwise false. +func AreLabelsInWhiteList(labels, whitelist Set) bool { + if len(whitelist) == 0 { + return true + } + + for k, v := range labels { + value, ok := whitelist[k] + if !ok { + return false + } + if value != v { + return false + } + } + return true +} + +// ConvertSelectorToLabelsMap converts selector string to labels map +// and validates keys and values +func ConvertSelectorToLabelsMap(selector string) (Set, error) { + labelsMap := Set{} + + if len(selector) == 0 { + return labelsMap, nil + } + + labels := strings.Split(selector, ",") + for _, label := range labels { + l := strings.Split(label, "=") + if len(l) != 2 { + return labelsMap, fmt.Errorf("invalid selector: %s", l) + } + key := strings.TrimSpace(l[0]) + if err := validateLabelKey(key); err != nil { + return labelsMap, err + } + value := strings.TrimSpace(l[1]) + if err := validateLabelValue(value); err != nil { + return labelsMap, err + } + labelsMap[key] = value + } + return labelsMap, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go new file mode 100644 index 000000000..50b41f99d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -0,0 +1,859 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "bytes" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" +) + +// Requirements is AND of all requirements. +type Requirements []Requirement + +// Selector represents a label selector. +type Selector interface { + // Matches returns true if this selector matches the given set of labels. + Matches(Labels) bool + + // Empty returns true if this selector does not restrict the selection space. + Empty() bool + + // String returns a human readable string that represents this selector. + String() string + + // Add adds requirements to the Selector + Add(r ...Requirement) Selector + + // Requirements converts this interface into Requirements to expose + // more detailed selection information. + // If there are querying parameters, it will return converted requirements and selectable=true. + // If this selector doesn't want to select anything, it will return selectable=false. + Requirements() (requirements Requirements, selectable bool) +} + +// Everything returns a selector that matches all labels. +func Everything() Selector { + return internalSelector{} +} + +type nothingSelector struct{} + +func (n nothingSelector) Matches(_ Labels) bool { return false } +func (n nothingSelector) Empty() bool { return false } +func (n nothingSelector) String() string { return "" } +func (n nothingSelector) Add(_ ...Requirement) Selector { return n } +func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } + +// Nothing returns a selector that matches no labels +func Nothing() Selector { + return nothingSelector{} +} + +// NewSelector returns a nil selector +func NewSelector() Selector { + return internalSelector(nil) +} + +type internalSelector []Requirement + +// ByKey sorts requirements by key to obtain deterministic parser +type ByKey []Requirement + +func (a ByKey) Len() int { return len(a) } + +func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key } + +// Requirement contains values, a key, and an operator that relates the key and values. +// The zero value of Requirement is invalid. +// Requirement implements both set based match and exact match +// Requirement should be initialized via NewRequirement constructor for creating a valid Requirement. +type Requirement struct { + key string + operator selection.Operator + // In huge majority of cases we have at most one value here. + // It is generally faster to operate on a single-element slice + // than on a single-element map, so we have a slice here. + strValues []string +} + +// NewRequirement is the constructor for a Requirement. +// If any of these rules is violated, an error is returned: +// (1) The operator can only be In, NotIn, Equals, DoubleEquals, NotEquals, Exists, or DoesNotExist. +// (2) If the operator is In or NotIn, the values set must be non-empty. +// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. +// (4) If the operator is Exists or DoesNotExist, the value set must be empty. +// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. +// (6) The key is invalid due to its length, or sequence +// of characters. See validateLabelKey for more details. +// +// The empty string is a valid value in the input values set. +func NewRequirement(key string, op selection.Operator, vals []string) (*Requirement, error) { + if err := validateLabelKey(key); err != nil { + return nil, err + } + switch op { + case selection.In, selection.NotIn: + if len(vals) == 0 { + return nil, fmt.Errorf("for 'in', 'notin' operators, values set can't be empty") + } + case selection.Equals, selection.DoubleEquals, selection.NotEquals: + if len(vals) != 1 { + return nil, fmt.Errorf("exact-match compatibility requires one single value") + } + case selection.Exists, selection.DoesNotExist: + if len(vals) != 0 { + return nil, fmt.Errorf("values set must be empty for exists and does not exist") + } + case selection.GreaterThan, selection.LessThan: + if len(vals) != 1 { + return nil, fmt.Errorf("for 'Gt', 'Lt' operators, exactly one value is required") + } + for i := range vals { + if _, err := strconv.ParseInt(vals[i], 10, 64); err != nil { + return nil, fmt.Errorf("for 'Gt', 'Lt' operators, the value must be an integer") + } + } + default: + return nil, fmt.Errorf("operator '%v' is not recognized", op) + } + + for i := range vals { + if err := validateLabelValue(vals[i]); err != nil { + return nil, err + } + } + sort.Strings(vals) + return &Requirement{key: key, operator: op, strValues: vals}, nil +} + +func (r *Requirement) hasValue(value string) bool { + for i := range r.strValues { + if r.strValues[i] == value { + return true + } + } + return false +} + +// Matches returns true if the Requirement matches the input Labels. +// There is a match in the following cases: +// (1) The operator is Exists and Labels has the Requirement's key. +// (2) The operator is In, Labels has the Requirement's key and Labels' +// value for that key is in Requirement's value set. +// (3) The operator is NotIn, Labels has the Requirement's key and +// Labels' value for that key is not in Requirement's value set. +// (4) The operator is DoesNotExist or NotIn and Labels does not have the +// Requirement's key. +// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has +// the Requirement's key and the corresponding value satisfies mathematical inequality. +func (r *Requirement) Matches(ls Labels) bool { + switch r.operator { + case selection.In, selection.Equals, selection.DoubleEquals: + if !ls.Has(r.key) { + return false + } + return r.hasValue(ls.Get(r.key)) + case selection.NotIn, selection.NotEquals: + if !ls.Has(r.key) { + return true + } + return !r.hasValue(ls.Get(r.key)) + case selection.Exists: + return ls.Has(r.key) + case selection.DoesNotExist: + return !ls.Has(r.key) + case selection.GreaterThan, selection.LessThan: + if !ls.Has(r.key) { + return false + } + lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) + if err != nil { + glog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) + return false + } + + // There should be only one strValue in r.strValues, and can be converted to a integer. + if len(r.strValues) != 1 { + glog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) + return false + } + + var rValue int64 + for i := range r.strValues { + rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) + if err != nil { + glog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) + return false + } + } + return (r.operator == selection.GreaterThan && lsValue > rValue) || (r.operator == selection.LessThan && lsValue < rValue) + default: + return false + } +} + +// Key returns requirement key +func (r *Requirement) Key() string { + return r.key +} + +// Operator returns requirement operator +func (r *Requirement) Operator() selection.Operator { + return r.operator +} + +// Values returns requirement values +func (r *Requirement) Values() sets.String { + ret := sets.String{} + for i := range r.strValues { + ret.Insert(r.strValues[i]) + } + return ret +} + +// Empty returns true if the internalSelector doesn't restrict selection space +func (lsel internalSelector) Empty() bool { + if lsel == nil { + return true + } + return len(lsel) == 0 +} + +// String returns a human-readable string that represents this +// Requirement. If called on an invalid Requirement, an error is +// returned. See NewRequirement for creating a valid Requirement. +func (r *Requirement) String() string { + var buffer bytes.Buffer + if r.operator == selection.DoesNotExist { + buffer.WriteString("!") + } + buffer.WriteString(r.key) + + switch r.operator { + case selection.Equals: + buffer.WriteString("=") + case selection.DoubleEquals: + buffer.WriteString("==") + case selection.NotEquals: + buffer.WriteString("!=") + case selection.In: + buffer.WriteString(" in ") + case selection.NotIn: + buffer.WriteString(" notin ") + case selection.GreaterThan: + buffer.WriteString(">") + case selection.LessThan: + buffer.WriteString("<") + case selection.Exists, selection.DoesNotExist: + return buffer.String() + } + + switch r.operator { + case selection.In, selection.NotIn: + buffer.WriteString("(") + } + if len(r.strValues) == 1 { + buffer.WriteString(r.strValues[0]) + } else { // only > 1 since == 0 prohibited by NewRequirement + buffer.WriteString(strings.Join(r.strValues, ",")) + } + + switch r.operator { + case selection.In, selection.NotIn: + buffer.WriteString(")") + } + return buffer.String() +} + +// Add adds requirements to the selector. It copies the current selector returning a new one +func (lsel internalSelector) Add(reqs ...Requirement) Selector { + var sel internalSelector + for ix := range lsel { + sel = append(sel, lsel[ix]) + } + for _, r := range reqs { + sel = append(sel, r) + } + sort.Sort(ByKey(sel)) + return sel +} + +// Matches for a internalSelector returns true if all +// its Requirements match the input Labels. If any +// Requirement does not match, false is returned. +func (lsel internalSelector) Matches(l Labels) bool { + for ix := range lsel { + if matches := lsel[ix].Matches(l); !matches { + return false + } + } + return true +} + +func (lsel internalSelector) Requirements() (Requirements, bool) { return Requirements(lsel), true } + +// String returns a comma-separated string of all +// the internalSelector Requirements' human-readable strings. +func (lsel internalSelector) String() string { + var reqs []string + for ix := range lsel { + reqs = append(reqs, lsel[ix].String()) + } + return strings.Join(reqs, ",") +} + +// Token represents constant definition for lexer token +type Token int + +const ( + // ErrorToken represents scan error + ErrorToken Token = iota + // EndOfStringToken represents end of string + EndOfStringToken + // ClosedParToken represents close parenthesis + ClosedParToken + // CommaToken represents the comma + CommaToken + // DoesNotExistToken represents logic not + DoesNotExistToken + // DoubleEqualsToken represents double equals + DoubleEqualsToken + // EqualsToken represents equal + EqualsToken + // GreaterThanToken represents greater than + GreaterThanToken + // IdentifierToken represents identifier, e.g. keys and values + IdentifierToken + // InToken represents in + InToken + // LessThanToken represents less than + LessThanToken + // NotEqualsToken represents not equal + NotEqualsToken + // NotInToken represents not in + NotInToken + // OpenParToken represents open parenthesis + OpenParToken +) + +// string2token contains the mapping between lexer Token and token literal +// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense) +var string2token = map[string]Token{ + ")": ClosedParToken, + ",": CommaToken, + "!": DoesNotExistToken, + "==": DoubleEqualsToken, + "=": EqualsToken, + ">": GreaterThanToken, + "in": InToken, + "<": LessThanToken, + "!=": NotEqualsToken, + "notin": NotInToken, + "(": OpenParToken, +} + +// ScannedItem contains the Token and the literal produced by the lexer. +type ScannedItem struct { + tok Token + literal string +} + +// isWhitespace returns true if the rune is a space, tab, or newline. +func isWhitespace(ch byte) bool { + return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' +} + +// isSpecialSymbol detect if the character ch can be an operator +func isSpecialSymbol(ch byte) bool { + switch ch { + case '=', '!', '(', ')', ',', '>', '<': + return true + } + return false +} + +// Lexer represents the Lexer struct for label selector. +// It contains necessary informationt to tokenize the input string +type Lexer struct { + // s stores the string to be tokenized + s string + // pos is the position currently tokenized + pos int +} + +// read return the character currently lexed +// increment the position and check the buffer overflow +func (l *Lexer) read() (b byte) { + b = 0 + if l.pos < len(l.s) { + b = l.s[l.pos] + l.pos++ + } + return b +} + +// unread 'undoes' the last read character +func (l *Lexer) unread() { + l.pos-- +} + +// scanIDOrKeyword scans string to recognize literal token (for example 'in') or an identifier. +func (l *Lexer) scanIDOrKeyword() (tok Token, lit string) { + var buffer []byte +IdentifierLoop: + for { + switch ch := l.read(); { + case ch == 0: + break IdentifierLoop + case isSpecialSymbol(ch) || isWhitespace(ch): + l.unread() + break IdentifierLoop + default: + buffer = append(buffer, ch) + } + } + s := string(buffer) + if val, ok := string2token[s]; ok { // is a literal token? + return val, s + } + return IdentifierToken, s // otherwise is an identifier +} + +// scanSpecialSymbol scans string starting with special symbol. +// special symbol identify non literal operators. "!=", "==", "=" +func (l *Lexer) scanSpecialSymbol() (Token, string) { + lastScannedItem := ScannedItem{} + var buffer []byte +SpecialSymbolLoop: + for { + switch ch := l.read(); { + case ch == 0: + break SpecialSymbolLoop + case isSpecialSymbol(ch): + buffer = append(buffer, ch) + if token, ok := string2token[string(buffer)]; ok { + lastScannedItem = ScannedItem{tok: token, literal: string(buffer)} + } else if lastScannedItem.tok != 0 { + l.unread() + break SpecialSymbolLoop + } + default: + l.unread() + break SpecialSymbolLoop + } + } + if lastScannedItem.tok == 0 { + return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer) + } + return lastScannedItem.tok, lastScannedItem.literal +} + +// skipWhiteSpaces consumes all blank characters +// returning the first non blank character +func (l *Lexer) skipWhiteSpaces(ch byte) byte { + for { + if !isWhitespace(ch) { + return ch + } + ch = l.read() + } +} + +// Lex returns a pair of Token and the literal +// literal is meaningfull only for IdentifierToken token +func (l *Lexer) Lex() (tok Token, lit string) { + switch ch := l.skipWhiteSpaces(l.read()); { + case ch == 0: + return EndOfStringToken, "" + case isSpecialSymbol(ch): + l.unread() + return l.scanSpecialSymbol() + default: + l.unread() + return l.scanIDOrKeyword() + } +} + +// Parser data structure contains the label selector parser data structure +type Parser struct { + l *Lexer + scannedItems []ScannedItem + position int +} + +// ParserContext represents context during parsing: +// some literal for example 'in' and 'notin' can be +// recognized as operator for example 'x in (a)' but +// it can be recognized as value for example 'value in (in)' +type ParserContext int + +const ( + // KeyAndOperator represents key and operator + KeyAndOperator ParserContext = iota + // Values represents values + Values +) + +// lookahead func returns the current token and string. No increment of current position +func (p *Parser) lookahead(context ParserContext) (Token, string) { + tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal + if context == Values { + switch tok { + case InToken, NotInToken: + tok = IdentifierToken + } + } + return tok, lit +} + +// consume returns current token and string. Increments the the position +func (p *Parser) consume(context ParserContext) (Token, string) { + p.position++ + tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal + if context == Values { + switch tok { + case InToken, NotInToken: + tok = IdentifierToken + } + } + return tok, lit +} + +// scan runs through the input string and stores the ScannedItem in an array +// Parser can now lookahead and consume the tokens +func (p *Parser) scan() { + for { + token, literal := p.l.Lex() + p.scannedItems = append(p.scannedItems, ScannedItem{token, literal}) + if token == EndOfStringToken { + break + } + } +} + +// parse runs the left recursive descending algorithm +// on input string. It returns a list of Requirement objects. +func (p *Parser) parse() (internalSelector, error) { + p.scan() // init scannedItems + + var requirements internalSelector + for { + tok, lit := p.lookahead(Values) + switch tok { + case IdentifierToken, DoesNotExistToken: + r, err := p.parseRequirement() + if err != nil { + return nil, fmt.Errorf("unable to parse requirement: %v", err) + } + requirements = append(requirements, *r) + t, l := p.consume(Values) + switch t { + case EndOfStringToken: + return requirements, nil + case CommaToken: + t2, l2 := p.lookahead(Values) + if t2 != IdentifierToken && t2 != DoesNotExistToken { + return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2) + } + default: + return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l) + } + case EndOfStringToken: + return requirements, nil + default: + return nil, fmt.Errorf("found '%s', expected: !, identifier, or 'end of string'", lit) + } + } +} + +func (p *Parser) parseRequirement() (*Requirement, error) { + key, operator, err := p.parseKeyAndInferOperator() + if err != nil { + return nil, err + } + if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked + return NewRequirement(key, operator, []string{}) + } + operator, err = p.parseOperator() + if err != nil { + return nil, err + } + var values sets.String + switch operator { + case selection.In, selection.NotIn: + values, err = p.parseValues() + case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan: + values, err = p.parseExactValue() + } + if err != nil { + return nil, err + } + return NewRequirement(key, operator, values.List()) + +} + +// parseKeyAndInferOperator parse literals. +// in case of no operator '!, in, notin, ==, =, !=' are found +// the 'exists' operator is inferred +func (p *Parser) parseKeyAndInferOperator() (string, selection.Operator, error) { + var operator selection.Operator + tok, literal := p.consume(Values) + if tok == DoesNotExistToken { + operator = selection.DoesNotExist + tok, literal = p.consume(Values) + } + if tok != IdentifierToken { + err := fmt.Errorf("found '%s', expected: identifier", literal) + return "", "", err + } + if err := validateLabelKey(literal); err != nil { + return "", "", err + } + if t, _ := p.lookahead(Values); t == EndOfStringToken || t == CommaToken { + if operator != selection.DoesNotExist { + operator = selection.Exists + } + } + return literal, operator, nil +} + +// parseOperator return operator and eventually matchType +// matchType can be exact +func (p *Parser) parseOperator() (op selection.Operator, err error) { + tok, lit := p.consume(KeyAndOperator) + switch tok { + // DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator + case InToken: + op = selection.In + case EqualsToken: + op = selection.Equals + case DoubleEqualsToken: + op = selection.DoubleEquals + case GreaterThanToken: + op = selection.GreaterThan + case LessThanToken: + op = selection.LessThan + case NotInToken: + op = selection.NotIn + case NotEqualsToken: + op = selection.NotEquals + default: + return "", fmt.Errorf("found '%s', expected: '=', '!=', '==', 'in', notin'", lit) + } + return op, nil +} + +// parseValues parses the values for set based matching (x,y,z) +func (p *Parser) parseValues() (sets.String, error) { + tok, lit := p.consume(Values) + if tok != OpenParToken { + return nil, fmt.Errorf("found '%s' expected: '('", lit) + } + tok, lit = p.lookahead(Values) + switch tok { + case IdentifierToken, CommaToken: + s, err := p.parseIdentifiersList() // handles general cases + if err != nil { + return s, err + } + if tok, _ = p.consume(Values); tok != ClosedParToken { + return nil, fmt.Errorf("found '%s', expected: ')'", lit) + } + return s, nil + case ClosedParToken: // handles "()" + p.consume(Values) + return sets.NewString(""), nil + default: + return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit) + } +} + +// parseIdentifiersList parses a (possibly empty) list of +// of comma separated (possibly empty) identifiers +func (p *Parser) parseIdentifiersList() (sets.String, error) { + s := sets.NewString() + for { + tok, lit := p.consume(Values) + switch tok { + case IdentifierToken: + s.Insert(lit) + tok2, lit2 := p.lookahead(Values) + switch tok2 { + case CommaToken: + continue + case ClosedParToken: + return s, nil + default: + return nil, fmt.Errorf("found '%s', expected: ',' or ')'", lit2) + } + case CommaToken: // handled here since we can have "(," + if s.Len() == 0 { + s.Insert("") // to handle (, + } + tok2, _ := p.lookahead(Values) + if tok2 == ClosedParToken { + s.Insert("") // to handle ,) Double "" removed by StringSet + return s, nil + } + if tok2 == CommaToken { + p.consume(Values) + s.Insert("") // to handle ,, Double "" removed by StringSet + } + default: // it can be operator + return s, fmt.Errorf("found '%s', expected: ',', or identifier", lit) + } + } +} + +// parseExactValue parses the only value for exact match style +func (p *Parser) parseExactValue() (sets.String, error) { + s := sets.NewString() + tok, lit := p.lookahead(Values) + if tok == EndOfStringToken || tok == CommaToken { + s.Insert("") + return s, nil + } + tok, lit = p.consume(Values) + if tok == IdentifierToken { + s.Insert(lit) + return s, nil + } + return nil, fmt.Errorf("found '%s', expected: identifier", lit) +} + +// Parse takes a string representing a selector and returns a selector +// object, or an error. This parsing function differs from ParseSelector +// as they parse different selectors with different syntaxes. +// The input will cause an error if it does not follow this form: +// +// ::= | "," +// ::= [!] KEY [ | ] +// ::= "" | +// ::= | +// ::= "notin" +// ::= "in" +// ::= "(" ")" +// ::= VALUE | VALUE "," +// ::= ["="|"=="|"!="] VALUE +// +// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters. +// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters. +// Delimiter is white space: (' ', '\t') +// Example of valid syntax: +// "x in (foo,,baz),y,z notin ()" +// +// Note: +// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the +// VALUEs in its requirement +// (2) Exclusion - " notin " - denotes that the KEY is not equal to any +// of the VALUEs in its requirement or does not exist +// (3) The empty string is a valid VALUE +// (4) A requirement with just a KEY - as in "y" above - denotes that +// the KEY exists and can be any VALUE. +// (5) A requirement with just !KEY requires that the KEY not exist. +// +func Parse(selector string) (Selector, error) { + parsedSelector, err := parse(selector) + if err == nil { + return parsedSelector, nil + } + return nil, err +} + +// parse parses the string representation of the selector and returns the internalSelector struct. +// The callers of this method can then decide how to return the internalSelector struct to their +// callers. This function has two callers now, one returns a Selector interface and the other +// returns a list of requirements. +func parse(selector string) (internalSelector, error) { + p := &Parser{l: &Lexer{s: selector, pos: 0}} + items, err := p.parse() + if err != nil { + return nil, err + } + sort.Sort(ByKey(items)) // sort to grant determistic parsing + return internalSelector(items), err +} + +func validateLabelKey(k string) error { + if errs := validation.IsQualifiedName(k); len(errs) != 0 { + return fmt.Errorf("invalid label key %q: %s", k, strings.Join(errs, "; ")) + } + return nil +} + +func validateLabelValue(v string) error { + if errs := validation.IsValidLabelValue(v); len(errs) != 0 { + return fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) + } + return nil +} + +// SelectorFromSet returns a Selector which will match exactly the given Set. A +// nil and empty Sets are considered equivalent to Everything(). +func SelectorFromSet(ls Set) Selector { + if ls == nil || len(ls) == 0 { + return internalSelector{} + } + var requirements internalSelector + for label, value := range ls { + r, err := NewRequirement(label, selection.Equals, []string{value}) + if err == nil { + requirements = append(requirements, *r) + } else { + //TODO: double check errors when input comes from serialization? + return internalSelector{} + } + } + // sort to have deterministic string representation + sort.Sort(ByKey(requirements)) + return requirements +} + +// SelectorFromValidatedSet returns a Selector which will match exactly the given Set. +// A nil and empty Sets are considered equivalent to Everything(). +// It assumes that Set is already validated and doesn't do any validation. +func SelectorFromValidatedSet(ls Set) Selector { + if ls == nil || len(ls) == 0 { + return internalSelector{} + } + var requirements internalSelector + for label, value := range ls { + requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}}) + } + // sort to have deterministic string representation + sort.Sort(ByKey(requirements)) + return requirements +} + +// ParseToRequirements takes a string representing a selector and returns a list of +// requirements. This function is suitable for those callers that perform additional +// processing on selector requirements. +// See the documentation for Parse() function for more details. +// TODO: Consider exporting the internalSelector type instead. +func ParseToRequirements(selector string) ([]Requirement, error) { + return parse(selector) +} diff --git a/vendor/k8s.io/apimachinery/pkg/openapi/common.go b/vendor/k8s.io/apimachinery/pkg/openapi/common.go new file mode 100644 index 000000000..bfab64a1c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/openapi/common.go @@ -0,0 +1,161 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "github.com/emicklei/go-restful" + "github.com/go-openapi/spec" + "strings" +) + +// OpenAPIDefinition describes single type. Normally these definitions are auto-generated using gen-openapi. +type OpenAPIDefinition struct { + Schema spec.Schema + Dependencies []string +} + +type ReferenceCallback func(path string) spec.Ref + +// OpenAPIDefinitions is collection of all definitions. +type GetOpenAPIDefinitions func(ReferenceCallback) map[string]OpenAPIDefinition + +// OpenAPIDefinitionGetter gets openAPI definitions for a given type. If a type implements this interface, +// the definition returned by it will be used, otherwise the auto-generated definitions will be used. See +// GetOpenAPITypeFormat for more information about trade-offs of using this interface or GetOpenAPITypeFormat method when +// possible. +type OpenAPIDefinitionGetter interface { + OpenAPIDefinition() *OpenAPIDefinition +} + +// Config is set of configuration for openAPI spec generation. +type Config struct { + // List of supported protocols such as https, http, etc. + ProtocolList []string + + // Info is general information about the API. + Info *spec.Info + + // DefaultResponse will be used if an operation does not have any responses listed. It + // will show up as ... "responses" : {"default" : $DefaultResponse} in the spec. + DefaultResponse *spec.Response + + // CommonResponses will be added as a response to all operation specs. This is a good place to add common + // responses such as authorization failed. + CommonResponses map[int]spec.Response + + // List of webservice's path prefixes to ignore + IgnorePrefixes []string + + // OpenAPIDefinitions should provide definition for all models used by routes. Failure to provide this map + // or any of the models will result in spec generation failure. + GetDefinitions GetOpenAPIDefinitions + + // GetOperationIDAndTags returns operation id and tags for a restful route. It is an optional function to customize operation IDs. + GetOperationIDAndTags func(r *restful.Route) (string, []string, error) + + // GetDefinitionName returns a friendly name for a definition base on the serving path. parameter `name` is the full name of the definition. + // It is an optional function to customize model names. + GetDefinitionName func(name string) (string, spec.Extensions) + + // PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving. + PostProcessSpec func(*spec.Swagger) (*spec.Swagger, error) + + // SecurityDefinitions is list of all security definitions for OpenAPI service. If this is not nil, the user of config + // is responsible to provide DefaultSecurity and (maybe) add unauthorized response to CommonResponses. + SecurityDefinitions *spec.SecurityDefinitions + + // DefaultSecurity for all operations. This will pass as spec.SwaggerProps.Security to OpenAPI. + // For most cases, this will be list of acceptable definitions in SecurityDefinitions. + DefaultSecurity []map[string][]string +} + +// This function is a reference for converting go (or any custom type) to a simple open API type,format pair. There are +// two ways to customize spec for a type. If you add it here, a type will be converted to a simple type and the type +// comment (the comment that is added before type definition) will be lost. The spec will still have the property +// comment. The second way is to implement OpenAPIDefinitionGetter interface. That function can customize the spec (so +// the spec does not need to be simple type,format) or can even return a simple type,format (e.g. IntOrString). For simple +// type formats, the benefit of adding OpenAPIDefinitionGetter interface is to keep both type and property documentation. +// Example: +// type Sample struct { +// ... +// // port of the server +// port IntOrString +// ... +// } +// // IntOrString documentation... +// type IntOrString { ... } +// +// Adding IntOrString to this function: +// "port" : { +// format: "string", +// type: "int-or-string", +// Description: "port of the server" +// } +// +// Implement OpenAPIDefinitionGetter for IntOrString: +// +// "port" : { +// $Ref: "#/definitions/IntOrString" +// Description: "port of the server" +// } +// ... +// definitions: +// { +// "IntOrString": { +// format: "string", +// type: "int-or-string", +// Description: "IntOrString documentation..." // new +// } +// } +// +func GetOpenAPITypeFormat(typeName string) (string, string) { + schemaTypeFormatMap := map[string][]string{ + "uint": {"integer", "int32"}, + "uint8": {"integer", "byte"}, + "uint16": {"integer", "int32"}, + "uint32": {"integer", "int64"}, + "uint64": {"integer", "int64"}, + "int": {"integer", "int32"}, + "int8": {"integer", "byte"}, + "int16": {"integer", "int32"}, + "int32": {"integer", "int32"}, + "int64": {"integer", "int64"}, + "byte": {"integer", "byte"}, + "float64": {"number", "double"}, + "float32": {"number", "float"}, + "bool": {"boolean", ""}, + "time.Time": {"string", "date-time"}, + "string": {"string", ""}, + "integer": {"integer", ""}, + "number": {"number", ""}, + "boolean": {"boolean", ""}, + "[]byte": {"string", "byte"}, // base64 encoded characters + "interface{}": {"object", ""}, + } + mapped, ok := schemaTypeFormatMap[typeName] + if !ok { + return "", "" + } + return mapped[0], mapped[1] +} + +func EscapeJsonPointer(p string) string { + // Escaping reference name using rfc6901 + p = strings.Replace(p, "~", "~0", -1) + p = strings.Replace(p, "/", "~1", -1) + return p +} diff --git a/vendor/k8s.io/apimachinery/pkg/openapi/doc.go b/vendor/k8s.io/apimachinery/pkg/openapi/doc.go new file mode 100644 index 000000000..5ed572cc1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/openapi/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package openapi holds shared codes and types between open API code generator and spec generator. +package openapi diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go new file mode 100644 index 000000000..d9748f066 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -0,0 +1,316 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/url" + "reflect" + + "k8s.io/apimachinery/pkg/conversion/queryparams" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// codec binds an encoder and decoder. +type codec struct { + Encoder + Decoder +} + +// NewCodec creates a Codec from an Encoder and Decoder. +func NewCodec(e Encoder, d Decoder) Codec { + return codec{e, d} +} + +// Encode is a convenience wrapper for encoding to a []byte from an Encoder +func Encode(e Encoder, obj Object) ([]byte, error) { + // TODO: reuse buffer + buf := &bytes.Buffer{} + if err := e.Encode(obj, buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// Decode is a convenience wrapper for decoding data into an Object. +func Decode(d Decoder, data []byte) (Object, error) { + obj, _, err := d.Decode(data, nil, nil) + return obj, err +} + +// DecodeInto performs a Decode into the provided object. +func DecodeInto(d Decoder, data []byte, into Object) error { + out, gvk, err := d.Decode(data, nil, into) + if err != nil { + return err + } + if out != into { + return fmt.Errorf("unable to decode %s into %v", gvk, reflect.TypeOf(into)) + } + return nil +} + +// EncodeOrDie is a version of Encode which will panic instead of returning an error. For tests. +func EncodeOrDie(e Encoder, obj Object) string { + bytes, err := Encode(e, obj) + if err != nil { + panic(err) + } + return string(bytes) +} + +// DefaultingSerializer invokes defaulting after decoding. +type DefaultingSerializer struct { + Defaulter ObjectDefaulter + Decoder Decoder + // Encoder is optional to allow this type to be used as both a Decoder and an Encoder + Encoder +} + +// Decode performs a decode and then allows the defaulter to act on the provided object. +func (d DefaultingSerializer) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + obj, gvk, err := d.Decoder.Decode(data, defaultGVK, into) + if err != nil { + return obj, gvk, err + } + d.Defaulter.Default(obj) + return obj, gvk, nil +} + +// UseOrCreateObject returns obj if the canonical ObjectKind returned by the provided typer matches gvk, or +// invokes the ObjectCreator to instantiate a new gvk. Returns an error if the typer cannot find the object. +func UseOrCreateObject(t ObjectTyper, c ObjectCreater, gvk schema.GroupVersionKind, obj Object) (Object, error) { + if obj != nil { + kinds, _, err := t.ObjectKinds(obj) + if err != nil { + return nil, err + } + for _, kind := range kinds { + if gvk == kind { + return obj, nil + } + } + } + return c.New(gvk) +} + +// NoopEncoder converts an Decoder to a Serializer or Codec for code that expects them but only uses decoding. +type NoopEncoder struct { + Decoder +} + +var _ Serializer = NoopEncoder{} + +func (n NoopEncoder) Encode(obj Object, w io.Writer) error { + return fmt.Errorf("encoding is not allowed for this codec: %v", reflect.TypeOf(n.Decoder)) +} + +// NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding. +type NoopDecoder struct { + Encoder +} + +var _ Serializer = NoopDecoder{} + +func (n NoopDecoder) Decode(data []byte, gvk *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + return nil, nil, fmt.Errorf("decoding is not allowed for this codec: %v", reflect.TypeOf(n.Encoder)) +} + +// NewParameterCodec creates a ParameterCodec capable of transforming url values into versioned objects and back. +func NewParameterCodec(scheme *Scheme) ParameterCodec { + return ¶meterCodec{ + typer: scheme, + convertor: scheme, + creator: scheme, + } +} + +// parameterCodec implements conversion to and from query parameters and objects. +type parameterCodec struct { + typer ObjectTyper + convertor ObjectConvertor + creator ObjectCreater +} + +var _ ParameterCodec = ¶meterCodec{} + +// DecodeParameters converts the provided url.Values into an object of type From with the kind of into, and then +// converts that object to into (if necessary). Returns an error if the operation cannot be completed. +func (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into Object) error { + if len(parameters) == 0 { + return nil + } + targetGVKs, _, err := c.typer.ObjectKinds(into) + if err != nil { + return err + } + for i := range targetGVKs { + if targetGVKs[i].GroupVersion() == from { + return c.convertor.Convert(¶meters, into, nil) + } + } + input, err := c.creator.New(from.WithKind(targetGVKs[0].Kind)) + if err != nil { + return err + } + if err := c.convertor.Convert(¶meters, input, nil); err != nil { + return err + } + return c.convertor.Convert(input, into, nil) +} + +// EncodeParameters converts the provided object into the to version, then converts that object to url.Values. +// Returns an error if conversion is not possible. +func (c *parameterCodec) EncodeParameters(obj Object, to schema.GroupVersion) (url.Values, error) { + gvks, _, err := c.typer.ObjectKinds(obj) + if err != nil { + return nil, err + } + gvk := gvks[0] + if to != gvk.GroupVersion() { + out, err := c.convertor.ConvertToVersion(obj, to) + if err != nil { + return nil, err + } + obj = out + } + return queryparams.Convert(obj) +} + +type base64Serializer struct { + Encoder + Decoder +} + +func NewBase64Serializer(e Encoder, d Decoder) Serializer { + return &base64Serializer{e, d} +} + +func (s base64Serializer) Encode(obj Object, stream io.Writer) error { + e := base64.NewEncoder(base64.StdEncoding, stream) + err := s.Encoder.Encode(obj, e) + e.Close() + return err +} + +func (s base64Serializer) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + out := make([]byte, base64.StdEncoding.DecodedLen(len(data))) + n, err := base64.StdEncoding.Decode(out, data) + if err != nil { + return nil, nil, err + } + return s.Decoder.Decode(out[:n], defaults, into) +} + +// SerializerInfoForMediaType returns the first info in types that has a matching media type (which cannot +// include media-type parameters), or the first info with an empty media type, or false if no type matches. +func SerializerInfoForMediaType(types []SerializerInfo, mediaType string) (SerializerInfo, bool) { + for _, info := range types { + if info.MediaType == mediaType { + return info, true + } + } + for _, info := range types { + if len(info.MediaType) == 0 { + return info, true + } + } + return SerializerInfo{}, false +} + +var ( + // InternalGroupVersioner will always prefer the internal version for a given group version kind. + InternalGroupVersioner GroupVersioner = internalGroupVersioner{} + // DisabledGroupVersioner will reject all kinds passed to it. + DisabledGroupVersioner GroupVersioner = disabledGroupVersioner{} +) + +type internalGroupVersioner struct{} + +// KindForGroupVersionKinds returns an internal Kind if one is found, or converts the first provided kind to the internal version. +func (internalGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + for _, kind := range kinds { + if kind.Version == APIVersionInternal { + return kind, true + } + } + for _, kind := range kinds { + return schema.GroupVersionKind{Group: kind.Group, Version: APIVersionInternal, Kind: kind.Kind}, true + } + return schema.GroupVersionKind{}, false +} + +type disabledGroupVersioner struct{} + +// KindForGroupVersionKinds returns false for any input. +func (disabledGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + return schema.GroupVersionKind{}, false +} + +// GroupVersioners implements GroupVersioner and resolves to the first exact match for any kind. +type GroupVersioners []GroupVersioner + +// KindForGroupVersionKinds returns the first match of any of the group versioners, or false if no match occured. +func (gvs GroupVersioners) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + for _, gv := range gvs { + target, ok := gv.KindForGroupVersionKinds(kinds) + if !ok { + continue + } + return target, true + } + return schema.GroupVersionKind{}, false +} + +// Assert that schema.GroupVersion and GroupVersions implement GroupVersioner +var _ GroupVersioner = schema.GroupVersion{} +var _ GroupVersioner = schema.GroupVersions{} +var _ GroupVersioner = multiGroupVersioner{} + +type multiGroupVersioner struct { + target schema.GroupVersion + acceptedGroupKinds []schema.GroupKind +} + +// NewMultiGroupVersioner returns the provided group version for any kind that matches one of the provided group kinds. +// Kind may be empty in the provided group kind, in which case any kind will match. +func NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner { + if len(groupKinds) == 0 || (len(groupKinds) == 1 && groupKinds[0].Group == gv.Group) { + return gv + } + return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds} +} + +// KindForGroupVersionKinds returns the target group version if any kind matches any of the original group kinds. It will +// use the originating kind where possible. +func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + for _, src := range kinds { + for _, kind := range v.acceptedGroupKinds { + if kind.Group != src.Group { + continue + } + if len(kind.Kind) > 0 && kind.Kind != src.Kind { + continue + } + return v.target.WithKind(src.Kind), true + } + } + return schema.GroupVersionKind{}, false +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go new file mode 100644 index 000000000..510444a4d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go @@ -0,0 +1,48 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// CheckCodec makes sure that the codec can encode objects like internalType, +// decode all of the external types listed, and also decode them into the given +// object. (Will modify internalObject.) (Assumes JSON serialization.) +// TODO: verify that the correct external version is chosen on encode... +func CheckCodec(c Codec, internalType Object, externalTypes ...schema.GroupVersionKind) error { + if _, err := Encode(c, internalType); err != nil { + return fmt.Errorf("Internal type not encodable: %v", err) + } + for _, et := range externalTypes { + exBytes := []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v"}`, et.Kind, et.GroupVersion().String())) + obj, err := Decode(c, exBytes) + if err != nil { + return fmt.Errorf("external type %s not interpretable: %v", et, err) + } + if reflect.TypeOf(obj) != reflect.TypeOf(internalType) { + return fmt.Errorf("decode of external type %s produced: %#v", et, obj) + } + if err = DecodeInto(c, exBytes, internalType); err != nil { + return fmt.Errorf("external type %s not convertible to internal type: %v", et, err) + } + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go new file mode 100644 index 000000000..8eedffc9c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go @@ -0,0 +1,98 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Defines conversions between generic types and structs to map query strings +// to struct objects. +package runtime + +import ( + "reflect" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/conversion" +) + +// JSONKeyMapper uses the struct tags on a conversion to determine the key value for +// the other side. Use when mapping from a map[string]* to a struct or vice versa. +func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, string) { + if s := destTag.Get("json"); len(s) > 0 { + return strings.SplitN(s, ",", 2)[0], key + } + if s := sourceTag.Get("json"); len(s) > 0 { + return key, strings.SplitN(s, ",", 2)[0] + } + return key, key +} + +// DefaultStringConversions are helpers for converting []string and string to real values. +var DefaultStringConversions = []interface{}{ + Convert_Slice_string_To_string, + Convert_Slice_string_To_int, + Convert_Slice_string_To_bool, + Convert_Slice_string_To_int64, +} + +func Convert_Slice_string_To_string(input *[]string, out *string, s conversion.Scope) error { + if len(*input) == 0 { + *out = "" + } + *out = (*input)[0] + return nil +} + +func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) error { + if len(*input) == 0 { + *out = 0 + } + str := (*input)[0] + i, err := strconv.Atoi(str) + if err != nil { + return err + } + *out = i + return nil +} + +// Conver_Slice_string_To_bool will convert a string parameter to boolean. +// Only the absence of a value, a value of "false", or a value of "0" resolve to false. +// Any other value (including empty string) resolves to true. +func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope) error { + if len(*input) == 0 { + *out = false + return nil + } + switch strings.ToLower((*input)[0]) { + case "false", "0": + *out = false + default: + *out = true + } + return nil +} + +func Convert_Slice_string_To_int64(input *[]string, out *int64, s conversion.Scope) error { + if len(*input) == 0 { + *out = 0 + } + str := (*input)[0] + i, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *out = i + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go new file mode 100644 index 000000000..06b45df66 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go @@ -0,0 +1,45 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package runtime includes helper functions for working with API objects +// that follow the kubernetes API object conventions, which are: +// +// 0. Your API objects have a common metadata struct member, TypeMeta. +// 1. Your code refers to an internal set of API objects. +// 2. In a separate package, you have an external set of API objects. +// 3. The external set is considered to be versioned, and no breaking +// changes are ever made to it (fields may be added but not changed +// or removed). +// 4. As your api evolves, you'll make an additional versioned package +// with every major change. +// 5. Versioned packages have conversion functions which convert to +// and from the internal version. +// 6. You'll continue to support older versions according to your +// deprecation policy, and you can easily provide a program/library +// to update old versions into new versions because of 5. +// 7. All of your serializations and deserializations are handled in a +// centralized place. +// +// Package runtime provides a conversion helper to make 5 easy, and the +// Encode/Decode/DecodeInto trio to accomplish 7. You can also register +// additional "codecs" which use a version of your choice. It's +// recommended that you register your types with runtime in your +// package's init function. +// +// As a bonus, a few common types useful from all api objects and versions +// are provided in types.go. + +package runtime // import "k8s.io/apimachinery/pkg/runtime" diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go new file mode 100644 index 000000000..e8825a787 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go @@ -0,0 +1,136 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "errors" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type encodable struct { + E Encoder `json:"-"` + obj Object + versions []schema.GroupVersion +} + +func (e encodable) GetObjectKind() schema.ObjectKind { return e.obj.GetObjectKind() } + +// NewEncodable creates an object that will be encoded with the provided codec on demand. +// Provided as a convenience for test cases dealing with internal objects. +func NewEncodable(e Encoder, obj Object, versions ...schema.GroupVersion) Object { + if _, ok := obj.(*Unknown); ok { + return obj + } + return encodable{e, obj, versions} +} + +func (re encodable) UnmarshalJSON(in []byte) error { + return errors.New("runtime.encodable cannot be unmarshalled from JSON") +} + +// Marshal may get called on pointers or values, so implement MarshalJSON on value. +// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go +func (re encodable) MarshalJSON() ([]byte, error) { + return Encode(re.E, re.obj) +} + +// NewEncodableList creates an object that will be encoded with the provided codec on demand. +// Provided as a convenience for test cases dealing with internal objects. +func NewEncodableList(e Encoder, objects []Object, versions ...schema.GroupVersion) []Object { + out := make([]Object, len(objects)) + for i := range objects { + if _, ok := objects[i].(*Unknown); ok { + out[i] = objects[i] + continue + } + out[i] = NewEncodable(e, objects[i], versions...) + } + return out +} + +func (re *Unknown) UnmarshalJSON(in []byte) error { + if re == nil { + return errors.New("runtime.Unknown: UnmarshalJSON on nil pointer") + } + re.TypeMeta = TypeMeta{} + re.Raw = append(re.Raw[0:0], in...) + re.ContentEncoding = "" + re.ContentType = ContentTypeJSON + return nil +} + +// Marshal may get called on pointers or values, so implement MarshalJSON on value. +// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go +func (re Unknown) MarshalJSON() ([]byte, error) { + // If ContentType is unset, we assume this is JSON. + if re.ContentType != "" && re.ContentType != ContentTypeJSON { + return nil, errors.New("runtime.Unknown: MarshalJSON on non-json data") + } + if re.Raw == nil { + return []byte("null"), nil + } + return re.Raw, nil +} + +func Convert_runtime_Object_To_runtime_RawExtension(in *Object, out *RawExtension, s conversion.Scope) error { + if in == nil { + out.Raw = []byte("null") + return nil + } + obj := *in + if unk, ok := obj.(*Unknown); ok { + if unk.Raw != nil { + out.Raw = unk.Raw + return nil + } + obj = out.Object + } + if obj == nil { + out.Raw = nil + return nil + } + out.Object = obj + return nil +} + +func Convert_runtime_RawExtension_To_runtime_Object(in *RawExtension, out *Object, s conversion.Scope) error { + if in.Object != nil { + *out = in.Object + return nil + } + data := in.Raw + if len(data) == 0 || (len(data) == 4 && string(data) == "null") { + *out = nil + return nil + } + *out = &Unknown{ + Raw: data, + // TODO: Set ContentEncoding and ContentType appropriately. + // Currently we set ContentTypeJSON to make tests passing. + ContentType: ContentTypeJSON, + } + return nil +} + +func DefaultEmbeddedConversions() []interface{} { + return []interface{}{ + Convert_runtime_Object_To_runtime_RawExtension, + Convert_runtime_RawExtension_To_runtime_Object, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go new file mode 100644 index 000000000..21a355707 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -0,0 +1,113 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type notRegisteredErr struct { + gvk schema.GroupVersionKind + target GroupVersioner + t reflect.Type +} + +func NewNotRegisteredErrForKind(gvk schema.GroupVersionKind) error { + return ¬RegisteredErr{gvk: gvk} +} + +func NewNotRegisteredErrForType(t reflect.Type) error { + return ¬RegisteredErr{t: t} +} + +func NewNotRegisteredErrForTarget(t reflect.Type, target GroupVersioner) error { + return ¬RegisteredErr{t: t, target: target} +} + +func (k *notRegisteredErr) Error() string { + if k.t != nil && k.target != nil { + return fmt.Sprintf("%v is not suitable for converting to %q", k.t, k.target) + } + if k.t != nil { + return fmt.Sprintf("no kind is registered for the type %v", k.t) + } + if len(k.gvk.Kind) == 0 { + return fmt.Sprintf("no version %q has been registered", k.gvk.GroupVersion()) + } + if k.gvk.Version == APIVersionInternal { + return fmt.Sprintf("no kind %q is registered for the internal version of group %q", k.gvk.Kind, k.gvk.Group) + } + + return fmt.Sprintf("no kind %q is registered for version %q", k.gvk.Kind, k.gvk.GroupVersion()) +} + +// IsNotRegisteredError returns true if the error indicates the provided +// object or input data is not registered. +func IsNotRegisteredError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*notRegisteredErr) + return ok +} + +type missingKindErr struct { + data string +} + +func NewMissingKindErr(data string) error { + return &missingKindErr{data} +} + +func (k *missingKindErr) Error() string { + return fmt.Sprintf("Object 'Kind' is missing in '%s'", k.data) +} + +// IsMissingKind returns true if the error indicates that the provided object +// is missing a 'Kind' field. +func IsMissingKind(err error) bool { + if err == nil { + return false + } + _, ok := err.(*missingKindErr) + return ok +} + +type missingVersionErr struct { + data string +} + +// IsMissingVersion returns true if the error indicates that the provided object +// is missing a 'Version' field. +func NewMissingVersionErr(data string) error { + return &missingVersionErr{data} +} + +func (k *missingVersionErr) Error() string { + return fmt.Sprintf("Object 'apiVersion' is missing in '%s'", k.data) +} + +func IsMissingVersion(err error) bool { + if err == nil { + return false + } + _, ok := err.(*missingVersionErr) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go new file mode 100644 index 000000000..4d23ee9ee --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go @@ -0,0 +1,48 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "encoding/json" + "errors" +) + +func (re *RawExtension) UnmarshalJSON(in []byte) error { + if re == nil { + return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer") + } + re.Raw = append(re.Raw[0:0], in...) + return nil +} + +// Marshal may get called on pointers or values, so implement MarshalJSON on value. +// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go +func (re RawExtension) MarshalJSON() ([]byte, error) { + if re.Raw == nil { + // TODO: this is to support legacy behavior of JSONPrinter and YAMLPrinter, which + // expect to call json.Marshal on arbitrary versioned objects (even those not in + // the scheme). pkg/kubectl/resource#AsVersionedObjects and its interaction with + // kubectl get on objects not in the scheme needs to be updated to ensure that the + // objects that are not part of the scheme are correctly put into the right form. + if re.Object != nil { + return json.Marshal(re.Object) + } + return []byte("null"), nil + } + // TODO: Check whether ContentType is actually JSON before returning it. + return re.Raw, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go new file mode 100644 index 000000000..bce8336a8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -0,0 +1,773 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +// DO NOT EDIT! + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto + + It has these top-level messages: + RawExtension + TypeMeta + Unknown +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *RawExtension) Reset() { *m = RawExtension{} } +func (*RawExtension) ProtoMessage() {} +func (*RawExtension) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *Unknown) Reset() { *m = Unknown{} } +func (*Unknown) ProtoMessage() {} +func (*Unknown) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension") + proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta") + proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown") +} +func (m *RawExtension) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RawExtension) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Raw != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i += copy(dAtA[i:], m.Raw) + } + return i, nil +} + +func (m *TypeMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + return i, nil +} + +func (m *Unknown) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Unknown) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TypeMeta.Size())) + n1, err := m.TypeMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.Raw != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i += copy(dAtA[i:], m.Raw) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding))) + i += copy(dAtA[i:], m.ContentEncoding) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) + i += copy(dAtA[i:], m.ContentType) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *RawExtension) Size() (n int) { + var l int + _ = l + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TypeMeta) Size() (n int) { + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Unknown) Size() (n int) { + var l int + _ = l + l = m.TypeMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ContentEncoding) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContentType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RawExtension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RawExtension{`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, + `}`, + }, "") + return s +} +func (this *TypeMeta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypeMeta{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *Unknown) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Unknown{`, + `TypeMeta:` + strings.Replace(strings.Replace(this.TypeMeta.String(), "TypeMeta", "TypeMeta", 1), `&`, ``, 1) + `,`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, + `ContentEncoding:` + fmt.Sprintf("%v", this.ContentEncoding) + `,`, + `ContentType:` + fmt.Sprintf("%v", this.ContentType) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RawExtension) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawExtension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawExtension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Unknown) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Unknown: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Unknown: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TypeMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContentEncoding", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContentEncoding = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContentType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContentType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 395 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0x4f, 0x6f, 0xd3, 0x30, + 0x18, 0xc6, 0xe3, 0xb5, 0x52, 0x87, 0x5b, 0x69, 0xc8, 0x1c, 0x08, 0x3b, 0x38, 0x53, 0x4f, 0xec, + 0x30, 0x5b, 0x1a, 0x42, 0xe2, 0xba, 0x4c, 0x93, 0x40, 0x08, 0x09, 0x59, 0xfc, 0x91, 0x38, 0xe1, + 0x26, 0x26, 0xb3, 0x42, 0x5f, 0x47, 0x8e, 0x43, 0xd8, 0x8d, 0x8f, 0xc0, 0xc7, 0xea, 0x71, 0xc7, + 0x9e, 0x2a, 0x1a, 0x3e, 0x04, 0x57, 0x54, 0xd7, 0x2d, 0xa5, 0x08, 0xed, 0x16, 0xbf, 0xcf, 0xf3, + 0x7b, 0xde, 0xe7, 0x0d, 0x7e, 0x5e, 0x3e, 0xab, 0x99, 0x36, 0xbc, 0x6c, 0x26, 0xca, 0x82, 0x72, + 0xaa, 0xe6, 0x5f, 0x14, 0xe4, 0xc6, 0xf2, 0x20, 0xc8, 0x4a, 0x4f, 0x65, 0x76, 0xad, 0x41, 0xd9, + 0x1b, 0x5e, 0x95, 0x05, 0xb7, 0x0d, 0x38, 0x3d, 0x55, 0xbc, 0x50, 0xa0, 0xac, 0x74, 0x2a, 0x67, + 0x95, 0x35, 0xce, 0x90, 0x64, 0x0d, 0xb0, 0x5d, 0x80, 0x55, 0x65, 0xc1, 0x02, 0x70, 0x7c, 0x56, + 0x68, 0x77, 0xdd, 0x4c, 0x58, 0x66, 0xa6, 0xbc, 0x30, 0x85, 0xe1, 0x9e, 0x9b, 0x34, 0x9f, 0xfc, + 0xcb, 0x3f, 0xfc, 0xd7, 0x3a, 0xef, 0xf8, 0xc9, 0xff, 0x0a, 0x34, 0x4e, 0x7f, 0xe6, 0x1a, 0x5c, + 0xed, 0xec, 0x7e, 0x89, 0xf1, 0x29, 0x1e, 0x09, 0xd9, 0x5e, 0x7d, 0x75, 0x0a, 0x6a, 0x6d, 0x80, + 0x3c, 0xc2, 0x3d, 0x2b, 0xdb, 0x18, 0x9d, 0xa0, 0xc7, 0xa3, 0x74, 0xd0, 0x2d, 0x92, 0x9e, 0x90, + 0xad, 0x58, 0xcd, 0xc6, 0x1f, 0xf1, 0xe1, 0x9b, 0x9b, 0x4a, 0xbd, 0x52, 0x4e, 0x92, 0x73, 0x8c, + 0x65, 0xa5, 0xdf, 0x29, 0xbb, 0x82, 0xbc, 0xfb, 0x5e, 0x4a, 0x66, 0x8b, 0x24, 0xea, 0x16, 0x09, + 0xbe, 0x78, 0xfd, 0x22, 0x28, 0x62, 0xc7, 0x45, 0x4e, 0x70, 0xbf, 0xd4, 0x90, 0xc7, 0x07, 0xde, + 0x3d, 0x0a, 0xee, 0xfe, 0x4b, 0x0d, 0xb9, 0xf0, 0xca, 0xf8, 0x17, 0xc2, 0x83, 0xb7, 0x50, 0x82, + 0x69, 0x81, 0xbc, 0xc7, 0x87, 0x2e, 0x6c, 0xf3, 0xf9, 0xc3, 0xf3, 0x53, 0x76, 0xc7, 0x0f, 0x63, + 0x9b, 0x7a, 0xe9, 0xfd, 0x10, 0xbe, 0x2d, 0x2c, 0xb6, 0x61, 0x9b, 0x0b, 0x0f, 0xfe, 0xbd, 0x90, + 0x5c, 0xe0, 0xa3, 0xcc, 0x80, 0x53, 0xe0, 0xae, 0x20, 0x33, 0xb9, 0x86, 0x22, 0xee, 0xf9, 0xb2, + 0x0f, 0x43, 0xde, 0xd1, 0xe5, 0xdf, 0xb2, 0xd8, 0xf7, 0x93, 0xa7, 0x78, 0x18, 0x46, 0xab, 0xd5, + 0x71, 0xdf, 0xe3, 0x0f, 0x02, 0x3e, 0xbc, 0xfc, 0x23, 0x89, 0x5d, 0x5f, 0x7a, 0x36, 0x5b, 0xd2, + 0xe8, 0x76, 0x49, 0xa3, 0xf9, 0x92, 0x46, 0xdf, 0x3a, 0x8a, 0x66, 0x1d, 0x45, 0xb7, 0x1d, 0x45, + 0xf3, 0x8e, 0xa2, 0x1f, 0x1d, 0x45, 0xdf, 0x7f, 0xd2, 0xe8, 0xc3, 0x20, 0x1c, 0xfa, 0x3b, 0x00, + 0x00, 0xff, 0xff, 0x3f, 0x1e, 0x24, 0x09, 0x85, 0x02, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto new file mode 100644 index 000000000..57fc84078 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.runtime; + +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "runtime"; + +// RawExtension is used to hold extensions in external versions. +// +// To use this, make a field which has RawExtension as its type in your external, versioned +// struct, and Object in your internal struct. You also need to register your +// various plugin types. +// +// // Internal package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.Object `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // External package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.RawExtension `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // On the wire, the JSON will look something like this: +// { +// "kind":"MyAPIObject", +// "apiVersion":"v1", +// "myPlugin": { +// "kind":"PluginA", +// "aOption":"foo", +// }, +// } +// +// So what happens? Decode first uses json or yaml to unmarshal the serialized data into +// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. +// The next step is to copy (using pkg/conversion) into the internal struct. The runtime +// package's DefaultScheme has conversion functions installed which will unpack the +// JSON stored in RawExtension, turning it into the correct object type, and storing it +// in the Object. (TODO: In the case where the object is of an unknown type, a +// runtime.Unknown object will be created and stored.) +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +message RawExtension { + // Raw is the underlying serialization of this object. + // + // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. + optional bytes raw = 1; +} + +// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, +// like this: +// type MyAwesomeAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// ... // other fields +// } +// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind +// +// TypeMeta is provided here for convenience. You may use it directly from this package or define +// your own with the same fields. +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +message TypeMeta { + // +optional + optional string apiVersion = 1; + + // +optional + optional string kind = 2; +} + +// Unknown allows api objects with unknown types to be passed-through. This can be used +// to deal with the API objects from a plug-in. Unknown objects still have functioning +// TypeMeta features-- kind, version, etc. +// TODO: Make this object have easy access to field based accessors and settors for +// metadata and field mutatation. +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +message Unknown { + optional TypeMeta typeMeta = 1; + + // Raw will hold the complete serialized object which couldn't be matched + // with a registered type. Most likely, nothing should be done with this + // except for passing it through the system. + optional bytes raw = 2; + + // ContentEncoding is encoding used to encode 'Raw' data. + // Unspecified means no encoding. + optional string contentEncoding = 3; + + // ContentType is serialization method used to serialize 'Raw'. + // Unspecified means ContentTypeJSON. + optional string contentType = 4; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go new file mode 100644 index 000000000..a6c1a8d34 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -0,0 +1,212 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "io" + "reflect" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/errors" +) + +// unsafeObjectConvertor implements ObjectConvertor using the unsafe conversion path. +type unsafeObjectConvertor struct { + *Scheme +} + +var _ ObjectConvertor = unsafeObjectConvertor{} + +// ConvertToVersion converts in to the provided outVersion without copying the input first, which +// is only safe if the output object is not mutated or reused. +func (c unsafeObjectConvertor) ConvertToVersion(in Object, outVersion GroupVersioner) (Object, error) { + return c.Scheme.UnsafeConvertToVersion(in, outVersion) +} + +// UnsafeObjectConvertor performs object conversion without copying the object structure, +// for use when the converted object will not be reused or mutated. Primarily for use within +// versioned codecs, which use the external object for serialization but do not return it. +func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor { + return unsafeObjectConvertor{scheme} +} + +// SetField puts the value of src, into fieldName, which must be a member of v. +// The value of src must be assignable to the field. +func SetField(src interface{}, v reflect.Value, fieldName string) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + } + srcValue := reflect.ValueOf(src) + if srcValue.Type().AssignableTo(field.Type()) { + field.Set(srcValue) + return nil + } + if srcValue.Type().ConvertibleTo(field.Type()) { + field.Set(srcValue.Convert(field.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", srcValue.Type(), field.Type()) +} + +// Field puts the value of fieldName, which must be a member of v, into dest, +// which must be a variable to which this field's value can be assigned. +func Field(v reflect.Value, fieldName string, dest interface{}) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + } + destValue, err := conversion.EnforcePtr(dest) + if err != nil { + return err + } + if field.Type().AssignableTo(destValue.Type()) { + destValue.Set(field) + return nil + } + if field.Type().ConvertibleTo(destValue.Type()) { + destValue.Set(field.Convert(destValue.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), destValue.Type()) +} + +// fieldPtr puts the address of fieldName, which must be a member of v, +// into dest, which must be an address of a variable to which this field's +// address can be assigned. +func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + } + v, err := conversion.EnforcePtr(dest) + if err != nil { + return err + } + field = field.Addr() + if field.Type().AssignableTo(v.Type()) { + v.Set(field) + return nil + } + if field.Type().ConvertibleTo(v.Type()) { + v.Set(field.Convert(v.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), v.Type()) +} + +// EncodeList ensures that each object in an array is converted to a Unknown{} in serialized form. +// TODO: accept a content type. +func EncodeList(e Encoder, objects []Object) error { + var errs []error + for i := range objects { + data, err := Encode(e, objects[i]) + if err != nil { + errs = append(errs, err) + continue + } + // TODO: Set ContentEncoding and ContentType. + objects[i] = &Unknown{Raw: data} + } + return errors.NewAggregate(errs) +} + +func decodeListItem(obj *Unknown, decoders []Decoder) (Object, error) { + for _, decoder := range decoders { + // TODO: Decode based on ContentType. + obj, err := Decode(decoder, obj.Raw) + if err != nil { + if IsNotRegisteredError(err) { + continue + } + return nil, err + } + return obj, nil + } + // could not decode, so leave the object as Unknown, but give the decoders the + // chance to set Unknown.TypeMeta if it is available. + for _, decoder := range decoders { + if err := DecodeInto(decoder, obj.Raw, obj); err == nil { + return obj, nil + } + } + return obj, nil +} + +// DecodeList alters the list in place, attempting to decode any objects found in +// the list that have the Unknown type. Any errors that occur are returned +// after the entire list is processed. Decoders are tried in order. +func DecodeList(objects []Object, decoders ...Decoder) []error { + errs := []error(nil) + for i, obj := range objects { + switch t := obj.(type) { + case *Unknown: + decoded, err := decodeListItem(t, decoders) + if err != nil { + errs = append(errs, err) + break + } + objects[i] = decoded + } + } + return errs +} + +// MultiObjectTyper returns the types of objects across multiple schemes in order. +type MultiObjectTyper []ObjectTyper + +var _ ObjectTyper = MultiObjectTyper{} + +func (m MultiObjectTyper) ObjectKinds(obj Object) (gvks []schema.GroupVersionKind, unversionedType bool, err error) { + for _, t := range m { + gvks, unversionedType, err = t.ObjectKinds(obj) + if err == nil { + return + } + } + return +} + +func (m MultiObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool { + for _, t := range m { + if t.Recognizes(gvk) { + return true + } + } + return false +} + +// SetZeroValue would set the object of objPtr to zero value of its type. +func SetZeroValue(objPtr Object) error { + v, err := conversion.EnforcePtr(objPtr) + if err != nil { + return err + } + v.Set(reflect.Zero(v.Type())) + return nil +} + +// DefaultFramer is valid for any stream that can read objects serially without +// any separation in the stream. +var DefaultFramer = defaultFramer{} + +type defaultFramer struct{} + +func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r } +func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer { return w } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go new file mode 100644 index 000000000..fcb18ba11 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -0,0 +1,251 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "io" + "net/url" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + // APIVersionInternal may be used if you are registering a type that should not + // be considered stable or serialized - it is a convention only and has no + // special behavior in this package. + APIVersionInternal = "__internal" +) + +// GroupVersioner refines a set of possible conversion targets into a single option. +type GroupVersioner interface { + // KindForGroupVersionKinds returns a desired target group version kind for the given input, or returns ok false if no + // target is known. In general, if the return target is not in the input list, the caller is expected to invoke + // Scheme.New(target) and then perform a conversion between the current Go type and the destination Go type. + // Sophisticated implementations may use additional information about the input kinds to pick a destination kind. + KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (target schema.GroupVersionKind, ok bool) +} + +// Encoders write objects to a serialized form +type Encoder interface { + // Encode writes an object to a stream. Implementations may return errors if the versions are + // incompatible, or if no conversion is defined. + Encode(obj Object, w io.Writer) error +} + +// Decoders attempt to load an object from data. +type Decoder interface { + // Decode attempts to deserialize the provided data using either the innate typing of the scheme or the + // default kind, group, and version provided. It returns a decoded object as well as the kind, group, and + // version from the serialized data, or an error. If into is non-nil, it will be used as the target type + // and implementations may choose to use it rather than reallocating an object. However, the object is not + // guaranteed to be populated. The returned object is not guaranteed to match into. If defaults are + // provided, they are applied to the data by default. If no defaults or partial defaults are provided, the + // type of the into may be used to guide conversion decisions. + Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) +} + +// Serializer is the core interface for transforming objects into a serialized format and back. +// Implementations may choose to perform conversion of the object, but no assumptions should be made. +type Serializer interface { + Encoder + Decoder +} + +// Codec is a Serializer that deals with the details of versioning objects. It offers the same +// interface as Serializer, so this is a marker to consumers that care about the version of the objects +// they receive. +type Codec Serializer + +// ParameterCodec defines methods for serializing and deserializing API objects to url.Values and +// performing any necessary conversion. Unlike the normal Codec, query parameters are not self describing +// and the desired version must be specified. +type ParameterCodec interface { + // DecodeParameters takes the given url.Values in the specified group version and decodes them + // into the provided object, or returns an error. + DecodeParameters(parameters url.Values, from schema.GroupVersion, into Object) error + // EncodeParameters encodes the provided object as query parameters or returns an error. + EncodeParameters(obj Object, to schema.GroupVersion) (url.Values, error) +} + +// Framer is a factory for creating readers and writers that obey a particular framing pattern. +type Framer interface { + NewFrameReader(r io.ReadCloser) io.ReadCloser + NewFrameWriter(w io.Writer) io.Writer +} + +// SerializerInfo contains information about a specific serialization format +type SerializerInfo struct { + // MediaType is the value that represents this serializer over the wire. + MediaType string + // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. + EncodesAsText bool + // Serializer is the individual object serializer for this media type. + Serializer Serializer + // PrettySerializer, if set, can serialize this object in a form biased towards + // readability. + PrettySerializer Serializer + // StreamSerializer, if set, describes the streaming serialization format + // for this media type. + StreamSerializer *StreamSerializerInfo +} + +// StreamSerializerInfo contains information about a specific stream serialization format +type StreamSerializerInfo struct { + // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. + EncodesAsText bool + // Serializer is the top level object serializer for this type when streaming + Serializer + // Framer is the factory for retrieving streams that separate objects on the wire + Framer +} + +// NegotiatedSerializer is an interface used for obtaining encoders, decoders, and serializers +// for multiple supported media types. This would commonly be accepted by a server component +// that performs HTTP content negotiation to accept multiple formats. +type NegotiatedSerializer interface { + // SupportedMediaTypes is the media types supported for reading and writing single objects. + SupportedMediaTypes() []SerializerInfo + + // EncoderForVersion returns an encoder that ensures objects being written to the provided + // serializer are in the provided group version. + EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder + // DecoderForVersion returns a decoder that ensures objects being read by the provided + // serializer are in the provided group version by default. + DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder +} + +// StorageSerializer is an interface used for obtaining encoders, decoders, and serializers +// that can read and write data at rest. This would commonly be used by client tools that must +// read files, or server side storage interfaces that persist restful objects. +type StorageSerializer interface { + // SupportedMediaTypes are the media types supported for reading and writing objects. + SupportedMediaTypes() []SerializerInfo + + // UniversalDeserializer returns a Serializer that can read objects in multiple supported formats + // by introspecting the data at rest. + UniversalDeserializer() Decoder + + // EncoderForVersion returns an encoder that ensures objects being written to the provided + // serializer are in the provided group version. + EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder + // DecoderForVersion returns a decoder that ensures objects being read by the provided + // serializer are in the provided group version by default. + DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder +} + +// NestedObjectEncoder is an optional interface that objects may implement to be given +// an opportunity to encode any nested Objects / RawExtensions during serialization. +type NestedObjectEncoder interface { + EncodeNestedObjects(e Encoder) error +} + +// NestedObjectDecoder is an optional interface that objects may implement to be given +// an opportunity to decode any nested Objects / RawExtensions during serialization. +type NestedObjectDecoder interface { + DecodeNestedObjects(d Decoder) error +} + +/////////////////////////////////////////////////////////////////////////////// +// Non-codec interfaces + +type ObjectDefaulter interface { + // Default takes an object (must be a pointer) and applies any default values. + // Defaulters may not error. + Default(in Object) +} + +type ObjectVersioner interface { + ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error) +} + +// ObjectConvertor converts an object to a different version. +type ObjectConvertor interface { + // Convert attempts to convert one object into another, or returns an error. This method does + // not guarantee the in object is not mutated. The context argument will be passed to + // all nested conversions. + Convert(in, out, context interface{}) error + // ConvertToVersion takes the provided object and converts it the provided version. This + // method does not guarantee that the in object is not mutated. This method is similar to + // Convert() but handles specific details of choosing the correct output version. + ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error) + ConvertFieldLabel(version, kind, label, value string) (string, string, error) +} + +// ObjectTyper contains methods for extracting the APIVersion and Kind +// of objects. +type ObjectTyper interface { + // ObjectKinds returns the all possible group,version,kind of the provided object, true if + // the object is unversioned, or an error if the object is not recognized + // (IsNotRegisteredError will return true). + ObjectKinds(Object) ([]schema.GroupVersionKind, bool, error) + // Recognizes returns true if the scheme is able to handle the provided version and kind, + // or more precisely that the provided version is a possible conversion or decoding + // target. + Recognizes(gvk schema.GroupVersionKind) bool +} + +// ObjectCreater contains methods for instantiating an object by kind and version. +type ObjectCreater interface { + New(kind schema.GroupVersionKind) (out Object, err error) +} + +// ObjectCopier duplicates an object. +type ObjectCopier interface { + // Copy returns an exact copy of the provided Object, or an error if the + // copy could not be completed. + Copy(Object) (Object, error) +} + +// ResourceVersioner provides methods for setting and retrieving +// the resource version from an API object. +type ResourceVersioner interface { + SetResourceVersion(obj Object, version string) error + ResourceVersion(obj Object) (string, error) +} + +// SelfLinker provides methods for setting and retrieving the SelfLink field of an API object. +type SelfLinker interface { + SetSelfLink(obj Object, selfLink string) error + SelfLink(obj Object) (string, error) + + // Knowing Name is sometimes necessary to use a SelfLinker. + Name(obj Object) (string, error) + // Knowing Namespace is sometimes necessary to use a SelfLinker + Namespace(obj Object) (string, error) +} + +// All API types registered with Scheme must support the Object interface. Since objects in a scheme are +// expected to be serialized to the wire, the interface an Object must provide to the Scheme allows +// serializers to set the kind, version, and group the object is represented as. An Object may choose +// to return a no-op ObjectKindAccessor in cases where it is not expected to be serialized. +type Object interface { + GetObjectKind() schema.ObjectKind +} + +// Unstructured objects store values as map[string]interface{}, with only values that can be serialized +// to JSON allowed. +type Unstructured interface { + // IsUnstructuredObject is a marker interface to allow objects that can be serialized but not introspected + // to bypass conversion. + IsUnstructuredObject() + // IsList returns true if this type is a list or matches the list convention - has an array called "items". + IsList() bool + // UnstructuredContent returns a non-nil, mutable map of the contents of this object. Values may be + // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to + // and from JSON. + UnstructuredContent() map[string]interface{} +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/register.go b/vendor/k8s.io/apimachinery/pkg/runtime/register.go new file mode 100644 index 000000000..2ec6db820 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/register.go @@ -0,0 +1,61 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import "k8s.io/apimachinery/pkg/runtime/schema" + +// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *Unknown) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } + +// GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind +// interface if no objects are provided, or the ObjectKind interface of the object in the +// highest array position. +func (obj *VersionedObjects) GetObjectKind() schema.ObjectKind { + last := obj.Last() + if last == nil { + return schema.EmptyObjectKind + } + return last.GetObjectKind() +} + +// First returns the leftmost object in the VersionedObjects array, which is usually the +// object as serialized on the wire. +func (obj *VersionedObjects) First() Object { + if len(obj.Objects) == 0 { + return nil + } + return obj.Objects[0] +} + +// Last is the rightmost object in the VersionedObjects array, which is the object after +// all transformations have been applied. This is the same object that would be returned +// by Decode in a normal invocation (without VersionedObjects in the into argument). +func (obj *VersionedObjects) Last() Object { + if len(obj.Objects) == 0 { + return nil + } + return obj.Objects[len(obj.Objects)-1] +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go new file mode 100644 index 000000000..e2cc12166 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +// DO NOT EDIT! + +/* + Package schema is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto + + It has these top-level messages: +*/ +package schema + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 202 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0xce, 0xaf, 0x4e, 0x04, 0x31, + 0x10, 0xc7, 0xf1, 0xd6, 0x20, 0x90, 0xc8, 0x13, 0x23, 0x51, 0xd0, 0x11, 0x18, 0x34, 0x2f, 0x80, + 0xc7, 0x75, 0xf7, 0x86, 0x6e, 0x53, 0xfa, 0x27, 0xed, 0x94, 0x04, 0xc7, 0x23, 0xf0, 0x58, 0x27, + 0x4f, 0xae, 0x64, 0xcb, 0x8b, 0x90, 0xb4, 0x2b, 0x08, 0xc9, 0xb9, 0xfe, 0xd2, 0x7c, 0x26, 0xdf, + 0xeb, 0x67, 0xf7, 0x58, 0x94, 0x8d, 0xe8, 0xea, 0x44, 0x39, 0x10, 0x53, 0xc1, 0x77, 0x0a, 0xc7, + 0x98, 0x71, 0xff, 0xd0, 0xc9, 0x7a, 0x3d, 0x2f, 0x36, 0x50, 0xfe, 0xc0, 0xe4, 0x0c, 0xe6, 0x1a, + 0xd8, 0x7a, 0xc2, 0x32, 0x2f, 0xe4, 0x35, 0x1a, 0x0a, 0x94, 0x35, 0xd3, 0x51, 0xa5, 0x1c, 0x39, + 0xde, 0xdc, 0x0e, 0xa7, 0xfe, 0x3a, 0x95, 0x9c, 0x51, 0xbb, 0x53, 0xc3, 0x1d, 0xee, 0x8d, 0xe5, + 0xa5, 0x4e, 0x6a, 0x8e, 0x1e, 0x4d, 0x34, 0x11, 0x3b, 0x9f, 0xea, 0x6b, 0x5f, 0x7d, 0xf4, 0xd7, + 0x38, 0x7b, 0x78, 0xb8, 0x94, 0x53, 0xd9, 0xbe, 0xa1, 0x0d, 0x5c, 0x38, 0xff, 0x6f, 0x79, 0xba, + 0x3b, 0x6d, 0x20, 0xce, 0x1b, 0x88, 0x75, 0x03, 0xf1, 0xd9, 0x40, 0x9e, 0x1a, 0xc8, 0x73, 0x03, + 0xb9, 0x36, 0x90, 0xdf, 0x0d, 0xe4, 0xd7, 0x0f, 0x88, 0x97, 0xab, 0x51, 0xf4, 0x1b, 0x00, 0x00, + 0xff, 0xff, 0xfd, 0x59, 0x57, 0x93, 0x0b, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto new file mode 100644 index 000000000..ebc1a263d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto @@ -0,0 +1,28 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.runtime.schema; + +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "schema"; + diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go new file mode 100644 index 000000000..1a9bba106 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -0,0 +1,277 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "fmt" + "strings" +) + +// ParseResourceArg takes the common style of string which may be either `resource.group.com` or `resource.version.group.com` +// and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended +// but with a knowledge of all GroupVersions, calling code can take a very good guess. If there are only two segments, then +// `*GroupVersionResource` is nil. +// `resource.group.com` -> `group=com, version=group, resource=resource` and `group=group.com, resource=resource` +func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) { + var gvr *GroupVersionResource + if strings.Count(arg, ".") >= 2 { + s := strings.SplitN(arg, ".", 3) + gvr = &GroupVersionResource{Group: s[2], Version: s[1], Resource: s[0]} + } + + return gvr, ParseGroupResource(arg) +} + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +type GroupResource struct { + Group string + Resource string +} + +func (gr GroupResource) WithVersion(version string) GroupVersionResource { + return GroupVersionResource{Group: gr.Group, Version: version, Resource: gr.Resource} +} + +func (gr GroupResource) Empty() bool { + return len(gr.Group) == 0 && len(gr.Resource) == 0 +} + +func (gr *GroupResource) String() string { + if len(gr.Group) == 0 { + return gr.Resource + } + return gr.Resource + "." + gr.Group +} + +// ParseGroupResource turns "resource.group" string into a GroupResource struct. Empty strings are allowed +// for each field. +func ParseGroupResource(gr string) GroupResource { + if i := strings.Index(gr, "."); i == -1 { + return GroupResource{Resource: gr} + } else { + return GroupResource{Group: gr[i+1:], Resource: gr[:i]} + } +} + +// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling +type GroupVersionResource struct { + Group string + Version string + Resource string +} + +func (gvr GroupVersionResource) Empty() bool { + return len(gvr.Group) == 0 && len(gvr.Version) == 0 && len(gvr.Resource) == 0 +} + +func (gvr GroupVersionResource) GroupResource() GroupResource { + return GroupResource{Group: gvr.Group, Resource: gvr.Resource} +} + +func (gvr GroupVersionResource) GroupVersion() GroupVersion { + return GroupVersion{Group: gvr.Group, Version: gvr.Version} +} + +func (gvr *GroupVersionResource) String() string { + return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") +} + +// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +type GroupKind struct { + Group string + Kind string +} + +func (gk GroupKind) Empty() bool { + return len(gk.Group) == 0 && len(gk.Kind) == 0 +} + +func (gk GroupKind) WithVersion(version string) GroupVersionKind { + return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} +} + +func (gk *GroupKind) String() string { + if len(gk.Group) == 0 { + return gk.Kind + } + return gk.Kind + "." + gk.Group +} + +// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling +type GroupVersionKind struct { + Group string + Version string + Kind string +} + +// Empty returns true if group, version, and kind are empty +func (gvk GroupVersionKind) Empty() bool { + return len(gvk.Group) == 0 && len(gvk.Version) == 0 && len(gvk.Kind) == 0 +} + +func (gvk GroupVersionKind) GroupKind() GroupKind { + return GroupKind{Group: gvk.Group, Kind: gvk.Kind} +} + +func (gvk GroupVersionKind) GroupVersion() GroupVersion { + return GroupVersion{Group: gvk.Group, Version: gvk.Version} +} + +func (gvk GroupVersionKind) String() string { + return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind +} + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +type GroupVersion struct { + Group string + Version string +} + +// Empty returns true if group and version are empty +func (gv GroupVersion) Empty() bool { + return len(gv.Group) == 0 && len(gv.Version) == 0 +} + +// String puts "group" and "version" into a single "group/version" string. For the legacy v1 +// it returns "v1". +func (gv GroupVersion) String() string { + // special case the internal apiVersion for the legacy kube types + if gv.Empty() { + return "" + } + + // special case of "v1" for backward compatibility + if len(gv.Group) == 0 && gv.Version == "v1" { + return gv.Version + } + if len(gv.Group) > 0 { + return gv.Group + "/" + gv.Version + } + return gv.Version +} + +// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false +// if none of the options match the group. It prefers a match to group and version over just group. +// TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. +// TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) +// in fewer places. +func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) { + for _, gvk := range kinds { + if gvk.Group == gv.Group && gvk.Version == gv.Version { + return gvk, true + } + } + for _, gvk := range kinds { + if gvk.Group == gv.Group { + return gv.WithKind(gvk.Kind), true + } + } + return GroupVersionKind{}, false +} + +// ParseGroupVersion turns "group/version" string into a GroupVersion struct. It reports error +// if it cannot parse the string. +func ParseGroupVersion(gv string) (GroupVersion, error) { + // this can be the internal version for the legacy kube types + // TODO once we've cleared the last uses as strings, this special case should be removed. + if (len(gv) == 0) || (gv == "/") { + return GroupVersion{}, nil + } + + switch strings.Count(gv, "/") { + case 0: + return GroupVersion{"", gv}, nil + case 1: + i := strings.Index(gv, "/") + return GroupVersion{gv[:i], gv[i+1:]}, nil + default: + return GroupVersion{}, fmt.Errorf("unexpected GroupVersion string: %v", gv) + } +} + +// WithKind creates a GroupVersionKind based on the method receiver's GroupVersion and the passed Kind. +func (gv GroupVersion) WithKind(kind string) GroupVersionKind { + return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} +} + +// WithResource creates a GroupVersionResource based on the method receiver's GroupVersion and the passed Resource. +func (gv GroupVersion) WithResource(resource string) GroupVersionResource { + return GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: resource} +} + +// GroupVersions can be used to represent a set of desired group versions. +// TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme. +// TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) +// in fewer places. +type GroupVersions []GroupVersion + +// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false +// if none of the options match the group. +func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (GroupVersionKind, bool) { + var targets []GroupVersionKind + for _, gv := range gvs { + target, ok := gv.KindForGroupVersionKinds(kinds) + if !ok { + continue + } + targets = append(targets, target) + } + if len(targets) == 1 { + return targets[0], true + } + if len(targets) > 1 { + return bestMatch(kinds, targets), true + } + return GroupVersionKind{}, false +} + +// bestMatch tries to pick best matching GroupVersionKind and falls back to the first +// found if no exact match exists. +func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersionKind { + for _, gvk := range targets { + for _, k := range kinds { + if k == gvk { + return k + } + } + } + return targets[0] +} + +// ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that +// do not use TypeMeta. +func (gvk *GroupVersionKind) ToAPIVersionAndKind() (string, string) { + if gvk == nil { + return "", "" + } + return gvk.GroupVersion().String(), gvk.Kind +} + +// FromAPIVersionAndKind returns a GVK representing the provided fields for types that +// do not use TypeMeta. This method exists to support test types and legacy serializations +// that have a distinct group and kind. +// TODO: further reduce usage of this method. +func FromAPIVersionAndKind(apiVersion, kind string) GroupVersionKind { + if gv, err := ParseGroupVersion(apiVersion); err == nil { + return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} + } + return GroupVersionKind{Kind: kind} +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go new file mode 100644 index 000000000..b57066845 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +// All objects that are serialized from a Scheme encode their type information. This interface is used +// by serialization to set type information from the Scheme onto the serialized version of an object. +// For objects that cannot be serialized or have unique requirements, this interface may be a no-op. +type ObjectKind interface { + // SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil + // should clear the current setting. + SetGroupVersionKind(kind GroupVersionKind) + // GroupVersionKind returns the stored group, version, and kind of an object, or nil if the object does + // not expose or provide these fields. + GroupVersionKind() GroupVersionKind +} + +// EmptyObjectKind implements the ObjectKind interface as a noop +var EmptyObjectKind = emptyObjectKind{} + +type emptyObjectKind struct{} + +// SetGroupVersionKind implements the ObjectKind interface +func (emptyObjectKind) SetGroupVersionKind(gvk GroupVersionKind) {} + +// GroupVersionKind implements the ObjectKind interface +func (emptyObjectKind) GroupVersionKind() GroupVersionKind { return GroupVersionKind{} } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go new file mode 100644 index 000000000..6c9475fa0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -0,0 +1,577 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "net/url" + "reflect" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Scheme defines methods for serializing and deserializing API objects, a type +// registry for converting group, version, and kind information to and from Go +// schemas, and mappings between Go schemas of different versions. A scheme is the +// foundation for a versioned API and versioned configuration over time. +// +// In a Scheme, a Type is a particular Go struct, a Version is a point-in-time +// identifier for a particular representation of that Type (typically backwards +// compatible), a Kind is the unique name for that Type within the Version, and a +// Group identifies a set of Versions, Kinds, and Types that evolve over time. An +// Unversioned Type is one that is not yet formally bound to a type and is promised +// to be backwards compatible (effectively a "v1" of a Type that does not expect +// to break in the future). +// +// Schemes are not expected to change at runtime and are only threadsafe after +// registration is complete. +type Scheme struct { + // versionMap allows one to figure out the go type of an object with + // the given version and name. + gvkToType map[schema.GroupVersionKind]reflect.Type + + // typeToGroupVersion allows one to find metadata for a given go object. + // The reflect.Type we index by should *not* be a pointer. + typeToGVK map[reflect.Type][]schema.GroupVersionKind + + // unversionedTypes are transformed without conversion in ConvertToVersion. + unversionedTypes map[reflect.Type]schema.GroupVersionKind + + // unversionedKinds are the names of kinds that can be created in the context of any group + // or version + // TODO: resolve the status of unversioned types. + unversionedKinds map[string]reflect.Type + + // Map from version and resource to the corresponding func to convert + // resource field labels in that version to internal version. + fieldLabelConversionFuncs map[string]map[string]FieldLabelConversionFunc + + // defaulterFuncs is an array of interfaces to be called with an object to provide defaulting + // the provided object must be a pointer. + defaulterFuncs map[reflect.Type]func(interface{}) + + // converter stores all registered conversion functions. It also has + // default coverting behavior. + converter *conversion.Converter + + // cloner stores all registered copy functions. It also has default + // deep copy behavior. + cloner *conversion.Cloner +} + +// Function to convert a field selector to internal representation. +type FieldLabelConversionFunc func(label, value string) (internalLabel, internalValue string, err error) + +// NewScheme creates a new Scheme. This scheme is pluggable by default. +func NewScheme() *Scheme { + s := &Scheme{ + gvkToType: map[schema.GroupVersionKind]reflect.Type{}, + typeToGVK: map[reflect.Type][]schema.GroupVersionKind{}, + unversionedTypes: map[reflect.Type]schema.GroupVersionKind{}, + unversionedKinds: map[string]reflect.Type{}, + cloner: conversion.NewCloner(), + fieldLabelConversionFuncs: map[string]map[string]FieldLabelConversionFunc{}, + defaulterFuncs: map[reflect.Type]func(interface{}){}, + } + s.converter = conversion.NewConverter(s.nameFunc) + + s.AddConversionFuncs(DefaultEmbeddedConversions()...) + + // Enable map[string][]string conversions by default + if err := s.AddConversionFuncs(DefaultStringConversions...); err != nil { + panic(err) + } + if err := s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields); err != nil { + panic(err) + } + if err := s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields); err != nil { + panic(err) + } + return s +} + +// nameFunc returns the name of the type that we wish to use to determine when two types attempt +// a conversion. Defaults to the go name of the type if the type is not registered. +func (s *Scheme) nameFunc(t reflect.Type) string { + // find the preferred names for this type + gvks, ok := s.typeToGVK[t] + if !ok { + return t.Name() + } + + for _, gvk := range gvks { + internalGV := gvk.GroupVersion() + internalGV.Version = "__internal" // this is hacky and maybe should be passed in + internalGVK := internalGV.WithKind(gvk.Kind) + + if internalType, exists := s.gvkToType[internalGVK]; exists { + return s.typeToGVK[internalType][0].Kind + } + } + + return gvks[0].Kind +} + +// fromScope gets the input version, desired output version, and desired Scheme +// from a conversion.Scope. +func (s *Scheme) fromScope(scope conversion.Scope) *Scheme { + return s +} + +// Converter allows access to the converter for the scheme +func (s *Scheme) Converter() *conversion.Converter { + return s.converter +} + +// AddUnversionedTypes registers the provided types as "unversioned", which means that they follow special rules. +// Whenever an object of this type is serialized, it is serialized with the provided group version and is not +// converted. Thus unversioned objects are expected to remain backwards compatible forever, as if they were in an +// API group and version that would never be updated. +// +// TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into +// every version with particular schemas. Resolve this method at that point. +func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) { + s.AddKnownTypes(version, types...) + for _, obj := range types { + t := reflect.TypeOf(obj).Elem() + gvk := version.WithKind(t.Name()) + s.unversionedTypes[t] = gvk + if old, ok := s.unversionedKinds[gvk.Kind]; ok && t != old { + panic(fmt.Sprintf("%v.%v has already been registered as unversioned kind %q - kind name must be unique", old.PkgPath(), old.Name(), gvk)) + } + s.unversionedKinds[gvk.Kind] = t + } +} + +// AddKnownTypes registers all types passed in 'types' as being members of version 'version'. +// All objects passed to types should be pointers to structs. The name that go reports for +// the struct becomes the "kind" field when encoding. Version may not be empty - use the +// APIVersionInternal constant if you have a type that does not have a formal version. +func (s *Scheme) AddKnownTypes(gv schema.GroupVersion, types ...Object) { + for _, obj := range types { + t := reflect.TypeOf(obj) + if t.Kind() != reflect.Ptr { + panic("All types must be pointers to structs.") + } + t = t.Elem() + s.AddKnownTypeWithName(gv.WithKind(t.Name()), obj) + } +} + +// AddKnownTypeWithName is like AddKnownTypes, but it lets you specify what this type should +// be encoded as. Useful for testing when you don't want to make multiple packages to define +// your structs. Version may not be empty - use the APIVersionInternal constant if you have a +// type that does not have a formal version. +func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) { + t := reflect.TypeOf(obj) + if len(gvk.Version) == 0 { + panic(fmt.Sprintf("version is required on all types: %s %v", gvk, t)) + } + if t.Kind() != reflect.Ptr { + panic("All types must be pointers to structs.") + } + t = t.Elem() + if t.Kind() != reflect.Struct { + panic("All types must be pointers to structs.") + } + + if oldT, found := s.gvkToType[gvk]; found && oldT != t { + panic(fmt.Sprintf("Double registration of different types for %v: old=%v.%v, new=%v.%v", gvk, oldT.PkgPath(), oldT.Name(), t.PkgPath(), t.Name())) + } + + s.gvkToType[gvk] = t + + for _, existingGvk := range s.typeToGVK[t] { + if existingGvk == gvk { + return + } + } + s.typeToGVK[t] = append(s.typeToGVK[t], gvk) +} + +// KnownTypes returns the types known for the given version. +func (s *Scheme) KnownTypes(gv schema.GroupVersion) map[string]reflect.Type { + types := make(map[string]reflect.Type) + for gvk, t := range s.gvkToType { + if gv != gvk.GroupVersion() { + continue + } + + types[gvk.Kind] = t + } + return types +} + +// AllKnownTypes returns the all known types. +func (s *Scheme) AllKnownTypes() map[schema.GroupVersionKind]reflect.Type { + return s.gvkToType +} + +// ObjectKind returns the group,version,kind of the go object and true if this object +// is considered unversioned, or an error if it's not a pointer or is unregistered. +func (s *Scheme) ObjectKind(obj Object) (schema.GroupVersionKind, bool, error) { + gvks, unversionedType, err := s.ObjectKinds(obj) + if err != nil { + return schema.GroupVersionKind{}, false, err + } + return gvks[0], unversionedType, nil +} + +// ObjectKinds returns all possible group,version,kind of the go object, true if the +// object is considered unversioned, or an error if it's not a pointer or is unregistered. +func (s *Scheme) ObjectKinds(obj Object) ([]schema.GroupVersionKind, bool, error) { + v, err := conversion.EnforcePtr(obj) + if err != nil { + return nil, false, err + } + t := v.Type() + + gvks, ok := s.typeToGVK[t] + if !ok { + return nil, false, NewNotRegisteredErrForType(t) + } + _, unversionedType := s.unversionedTypes[t] + + return gvks, unversionedType, nil +} + +// Recognizes returns true if the scheme is able to handle the provided group,version,kind +// of an object. +func (s *Scheme) Recognizes(gvk schema.GroupVersionKind) bool { + _, exists := s.gvkToType[gvk] + return exists +} + +func (s *Scheme) IsUnversioned(obj Object) (bool, bool) { + v, err := conversion.EnforcePtr(obj) + if err != nil { + return false, false + } + t := v.Type() + + if _, ok := s.typeToGVK[t]; !ok { + return false, false + } + _, ok := s.unversionedTypes[t] + return ok, true +} + +// New returns a new API object of the given version and name, or an error if it hasn't +// been registered. The version and kind fields must be specified. +func (s *Scheme) New(kind schema.GroupVersionKind) (Object, error) { + if t, exists := s.gvkToType[kind]; exists { + return reflect.New(t).Interface().(Object), nil + } + + if t, exists := s.unversionedKinds[kind.Kind]; exists { + return reflect.New(t).Interface().(Object), nil + } + return nil, NewNotRegisteredErrForKind(kind) +} + +// AddGenericConversionFunc adds a function that accepts the ConversionFunc call pattern +// (for two conversion types) to the converter. These functions are checked first during +// a normal conversion, but are otherwise not called. Use AddConversionFuncs when registering +// typed conversions. +func (s *Scheme) AddGenericConversionFunc(fn conversion.GenericConversionFunc) { + s.converter.AddGenericConversionFunc(fn) +} + +// Log sets a logger on the scheme. For test purposes only +func (s *Scheme) Log(l conversion.DebugLogger) { + s.converter.Debug = l +} + +// AddIgnoredConversionType identifies a pair of types that should be skipped by +// conversion (because the data inside them is explicitly dropped during +// conversion). +func (s *Scheme) AddIgnoredConversionType(from, to interface{}) error { + return s.converter.RegisterIgnoredConversion(from, to) +} + +// AddConversionFuncs adds functions to the list of conversion functions. The given +// functions should know how to convert between two of your API objects, or their +// sub-objects. We deduce how to call these functions from the types of their two +// parameters; see the comment for Converter.Register. +// +// Note that, if you need to copy sub-objects that didn't change, you can use the +// conversion.Scope object that will be passed to your conversion function. +// Additionally, all conversions started by Scheme will set the SrcVersion and +// DestVersion fields on the Meta object. Example: +// +// s.AddConversionFuncs( +// func(in *InternalObject, out *ExternalObject, scope conversion.Scope) error { +// // You can depend on Meta() being non-nil, and this being set to +// // the source version, e.g., "" +// s.Meta().SrcVersion +// // You can depend on this being set to the destination version, +// // e.g., "v1". +// s.Meta().DestVersion +// // Call scope.Convert to copy sub-fields. +// s.Convert(&in.SubFieldThatMoved, &out.NewLocation.NewName, 0) +// return nil +// }, +// ) +// +// (For more detail about conversion functions, see Converter.Register's comment.) +// +// Also note that the default behavior, if you don't add a conversion function, is to +// sanely copy fields that have the same names and same type names. It's OK if the +// destination type has extra fields, but it must not remove any. So you only need to +// add conversion functions for things with changed/removed fields. +func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error { + for _, f := range conversionFuncs { + if err := s.converter.RegisterConversionFunc(f); err != nil { + return err + } + } + return nil +} + +// Similar to AddConversionFuncs, but registers conversion functions that were +// automatically generated. +func (s *Scheme) AddGeneratedConversionFuncs(conversionFuncs ...interface{}) error { + for _, f := range conversionFuncs { + if err := s.converter.RegisterGeneratedConversionFunc(f); err != nil { + return err + } + } + return nil +} + +// AddDeepCopyFuncs adds a function to the list of deep-copy functions. +// For the expected format of deep-copy function, see the comment for +// Copier.RegisterDeepCopyFunction. +func (s *Scheme) AddDeepCopyFuncs(deepCopyFuncs ...interface{}) error { + for _, f := range deepCopyFuncs { + if err := s.cloner.RegisterDeepCopyFunc(f); err != nil { + return err + } + } + return nil +} + +// Similar to AddDeepCopyFuncs, but registers deep-copy functions that were +// automatically generated. +func (s *Scheme) AddGeneratedDeepCopyFuncs(deepCopyFuncs ...conversion.GeneratedDeepCopyFunc) error { + for _, fn := range deepCopyFuncs { + if err := s.cloner.RegisterGeneratedDeepCopyFunc(fn); err != nil { + return err + } + } + return nil +} + +// AddFieldLabelConversionFunc adds a conversion function to convert field selectors +// of the given kind from the given version to internal version representation. +func (s *Scheme) AddFieldLabelConversionFunc(version, kind string, conversionFunc FieldLabelConversionFunc) error { + if s.fieldLabelConversionFuncs[version] == nil { + s.fieldLabelConversionFuncs[version] = map[string]FieldLabelConversionFunc{} + } + + s.fieldLabelConversionFuncs[version][kind] = conversionFunc + return nil +} + +// AddStructFieldConversion allows you to specify a mechanical copy for a moved +// or renamed struct field without writing an entire conversion function. See +// the comment in conversion.Converter.SetStructFieldCopy for parameter details. +// Call as many times as needed, even on the same fields. +func (s *Scheme) AddStructFieldConversion(srcFieldType interface{}, srcFieldName string, destFieldType interface{}, destFieldName string) error { + return s.converter.SetStructFieldCopy(srcFieldType, srcFieldName, destFieldType, destFieldName) +} + +// RegisterInputDefaults sets the provided field mapping function and field matching +// as the defaults for the provided input type. The fn may be nil, in which case no +// mapping will happen by default. Use this method to register a mechanism for handling +// a specific input type in conversion, such as a map[string]string to structs. +func (s *Scheme) RegisterInputDefaults(in interface{}, fn conversion.FieldMappingFunc, defaultFlags conversion.FieldMatchingFlags) error { + return s.converter.RegisterInputDefaults(in, fn, defaultFlags) +} + +// AddTypeDefaultingFuncs registers a function that is passed a pointer to an +// object and can default fields on the object. These functions will be invoked +// when Default() is called. The function will never be called unless the +// defaulted object matches srcType. If this function is invoked twice with the +// same srcType, the fn passed to the later call will be used instead. +func (s *Scheme) AddTypeDefaultingFunc(srcType Object, fn func(interface{})) { + s.defaulterFuncs[reflect.TypeOf(srcType)] = fn +} + +// Default sets defaults on the provided Object. +func (s *Scheme) Default(src Object) { + if fn, ok := s.defaulterFuncs[reflect.TypeOf(src)]; ok { + fn(src) + } +} + +// Copy does a deep copy of an API object. +func (s *Scheme) Copy(src Object) (Object, error) { + dst, err := s.DeepCopy(src) + if err != nil { + return nil, err + } + return dst.(Object), nil +} + +// Performs a deep copy of the given object. +func (s *Scheme) DeepCopy(src interface{}) (interface{}, error) { + return s.cloner.DeepCopy(src) +} + +// Convert will attempt to convert in into out. Both must be pointers. For easy +// testing of conversion functions. Returns an error if the conversion isn't +// possible. You can call this with types that haven't been registered (for example, +// a to test conversion of types that are nested within registered types). The +// context interface is passed to the convertor. +// TODO: identify whether context should be hidden, or behind a formal context/scope +// interface +func (s *Scheme) Convert(in, out interface{}, context interface{}) error { + flags, meta := s.generateConvertMeta(in) + meta.Context = context + if flags == 0 { + flags = conversion.AllowDifferentFieldTypeNames + } + return s.converter.Convert(in, out, flags, meta) +} + +// Converts the given field label and value for an kind field selector from +// versioned representation to an unversioned one. +func (s *Scheme) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + if s.fieldLabelConversionFuncs[version] == nil { + return "", "", fmt.Errorf("No field label conversion function found for version: %s", version) + } + conversionFunc, ok := s.fieldLabelConversionFuncs[version][kind] + if !ok { + return "", "", fmt.Errorf("No field label conversion function found for version %s and kind %s", version, kind) + } + return conversionFunc(label, value) +} + +// ConvertToVersion attempts to convert an input object to its matching Kind in another +// version within this scheme. Will return an error if the provided version does not +// contain the inKind (or a mapping by name defined with AddKnownTypeWithName). Will also +// return an error if the conversion does not result in a valid Object being +// returned. Passes target down to the conversion methods as the Context on the scope. +func (s *Scheme) ConvertToVersion(in Object, target GroupVersioner) (Object, error) { + return s.convertToVersion(true, in, target) +} + +// UnsafeConvertToVersion will convert in to the provided target if such a conversion is possible, +// but does not guarantee the output object does not share fields with the input object. It attempts to be as +// efficient as possible when doing conversion. +func (s *Scheme) UnsafeConvertToVersion(in Object, target GroupVersioner) (Object, error) { + return s.convertToVersion(false, in, target) +} + +// convertToVersion handles conversion with an optional copy. +func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) (Object, error) { + // determine the incoming kinds with as few allocations as possible. + t := reflect.TypeOf(in) + if t.Kind() != reflect.Ptr { + return nil, fmt.Errorf("only pointer types may be converted: %v", t) + } + t = t.Elem() + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t) + } + kinds, ok := s.typeToGVK[t] + if !ok || len(kinds) == 0 { + return nil, NewNotRegisteredErrForType(t) + } + + gvk, ok := target.KindForGroupVersionKinds(kinds) + if !ok { + // try to see if this type is listed as unversioned (for legacy support) + // TODO: when we move to server API versions, we should completely remove the unversioned concept + if unversionedKind, ok := s.unversionedTypes[t]; ok { + if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { + return copyAndSetTargetKind(copy, s, in, gvk) + } + return copyAndSetTargetKind(copy, s, in, unversionedKind) + } + + return nil, NewNotRegisteredErrForTarget(t, target) + } + + // target wants to use the existing type, set kind and return (no conversion necessary) + for _, kind := range kinds { + if gvk == kind { + return copyAndSetTargetKind(copy, s, in, gvk) + } + } + + // type is unversioned, no conversion necessary + if unversionedKind, ok := s.unversionedTypes[t]; ok { + if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { + return copyAndSetTargetKind(copy, s, in, gvk) + } + return copyAndSetTargetKind(copy, s, in, unversionedKind) + } + + out, err := s.New(gvk) + if err != nil { + return nil, err + } + + if copy { + copied, err := s.Copy(in) + if err != nil { + return nil, err + } + in = copied + } + + flags, meta := s.generateConvertMeta(in) + meta.Context = target + if err := s.converter.Convert(in, out, flags, meta); err != nil { + return nil, err + } + + setTargetKind(out, gvk) + return out, nil +} + +// generateConvertMeta constructs the meta value we pass to Convert. +func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) { + return s.converter.DefaultMeta(reflect.TypeOf(in)) +} + +// copyAndSetTargetKind performs a conditional copy before returning the object, or an error if copy was not successful. +func copyAndSetTargetKind(copy bool, copier ObjectCopier, obj Object, kind schema.GroupVersionKind) (Object, error) { + if copy { + copied, err := copier.Copy(obj) + if err != nil { + return nil, err + } + obj = copied + } + setTargetKind(obj, kind) + return obj, nil +} + +// setTargetKind sets the kind on an object, taking into account whether the target kind is the internal version. +func setTargetKind(obj Object, kind schema.GroupVersionKind) { + if kind.Version == APIVersionInternal { + // internal is a special case + // TODO: look at removing the need to special case this + obj.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{}) + return + } + obj.GetObjectKind().SetGroupVersionKind(kind) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go new file mode 100644 index 000000000..944db4818 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +// SchemeBuilder collects functions that add things to a scheme. It's to allow +// code to compile without explicitly referencing generated types. You should +// declare one in each package that will have generated deep copy or conversion +// functions. +type SchemeBuilder []func(*Scheme) error + +// AddToScheme applies all the stored functions to the scheme. A non-nil error +// indicates that one function failed and the attempt was abandoned. +func (sb *SchemeBuilder) AddToScheme(s *Scheme) error { + for _, f := range *sb { + if err := f(s); err != nil { + return err + } + } + return nil +} + +// Register adds a scheme setup function to the list. +func (sb *SchemeBuilder) Register(funcs ...func(*Scheme) error) { + for _, f := range funcs { + *sb = append(*sb, f) + } +} + +// NewSchemeBuilder calls Register for you. +func NewSchemeBuilder(funcs ...func(*Scheme) error) SchemeBuilder { + var sb SchemeBuilder + sb.Register(funcs...) + return sb +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go new file mode 100644 index 000000000..65f451124 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -0,0 +1,237 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serializer + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" + "k8s.io/apimachinery/pkg/runtime/serializer/versioning" +) + +// serializerExtensions are for serializers that are conditionally compiled in +var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){} + +type serializerType struct { + AcceptContentTypes []string + ContentType string + FileExtensions []string + // EncodesAsText should be true if this content type can be represented safely in UTF-8 + EncodesAsText bool + + Serializer runtime.Serializer + PrettySerializer runtime.Serializer + + AcceptStreamContentTypes []string + StreamContentType string + + Framer runtime.Framer + StreamSerializer runtime.Serializer +} + +func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []serializerType { + jsonSerializer := json.NewSerializer(mf, scheme, scheme, false) + jsonPrettySerializer := json.NewSerializer(mf, scheme, scheme, true) + yamlSerializer := json.NewYAMLSerializer(mf, scheme, scheme) + + serializers := []serializerType{ + { + AcceptContentTypes: []string{"application/json"}, + ContentType: "application/json", + FileExtensions: []string{"json"}, + EncodesAsText: true, + Serializer: jsonSerializer, + PrettySerializer: jsonPrettySerializer, + + Framer: json.Framer, + StreamSerializer: jsonSerializer, + }, + { + AcceptContentTypes: []string{"application/yaml"}, + ContentType: "application/yaml", + FileExtensions: []string{"yaml"}, + EncodesAsText: true, + Serializer: yamlSerializer, + }, + } + + for _, fn := range serializerExtensions { + if serializer, ok := fn(scheme); ok { + serializers = append(serializers, serializer) + } + } + return serializers +} + +// CodecFactory provides methods for retrieving codecs and serializers for specific +// versions and content types. +type CodecFactory struct { + scheme *runtime.Scheme + serializers []serializerType + universal runtime.Decoder + accepts []runtime.SerializerInfo + + legacySerializer runtime.Serializer +} + +// NewCodecFactory provides methods for retrieving serializers for the supported wire formats +// and conversion wrappers to define preferred internal and external versions. In the future, +// as the internal version is used less, callers may instead use a defaulting serializer and +// only convert objects which are shared internally (Status, common API machinery). +// TODO: allow other codecs to be compiled in? +// TODO: accept a scheme interface +func NewCodecFactory(scheme *runtime.Scheme) CodecFactory { + serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory) + return newCodecFactory(scheme, serializers) +} + +// newCodecFactory is a helper for testing that allows a different metafactory to be specified. +func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory { + decoders := make([]runtime.Decoder, 0, len(serializers)) + var accepts []runtime.SerializerInfo + alreadyAccepted := make(map[string]struct{}) + + var legacySerializer runtime.Serializer + for _, d := range serializers { + decoders = append(decoders, d.Serializer) + for _, mediaType := range d.AcceptContentTypes { + if _, ok := alreadyAccepted[mediaType]; ok { + continue + } + alreadyAccepted[mediaType] = struct{}{} + info := runtime.SerializerInfo{ + MediaType: d.ContentType, + EncodesAsText: d.EncodesAsText, + Serializer: d.Serializer, + PrettySerializer: d.PrettySerializer, + } + if d.StreamSerializer != nil { + info.StreamSerializer = &runtime.StreamSerializerInfo{ + Serializer: d.StreamSerializer, + EncodesAsText: d.EncodesAsText, + Framer: d.Framer, + } + } + accepts = append(accepts, info) + if mediaType == runtime.ContentTypeJSON { + legacySerializer = d.Serializer + } + } + } + if legacySerializer == nil { + legacySerializer = serializers[0].Serializer + } + + return CodecFactory{ + scheme: scheme, + serializers: serializers, + universal: recognizer.NewDecoder(decoders...), + + accepts: accepts, + + legacySerializer: legacySerializer, + } +} + +// SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for. +func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { + return f.accepts +} + +// LegacyCodec encodes output to a given API versions, and decodes output into the internal form from +// any recognized source. The returned codec will always encode output to JSON. If a type is not +// found in the list of versions an error will be returned. +// +// This method is deprecated - clients and servers should negotiate a serializer by mime-type and +// invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder(). +// +// TODO: make this call exist only in pkg/api, and initialize it with the set of default versions. +// All other callers will be forced to request a Codec directly. +func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec { + return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner) +} + +// UniversalDeserializer can convert any stored data recognized by this factory into a Go object that satisfies +// runtime.Object. It does not perform conversion. It does not perform defaulting. +func (f CodecFactory) UniversalDeserializer() runtime.Decoder { + return f.universal +} + +// UniversalDecoder returns a runtime.Decoder capable of decoding all known API objects in all known formats. Used +// by clients that do not need to encode objects but want to deserialize API objects stored on disk. Only decodes +// objects in groups registered with the scheme. The GroupVersions passed may be used to select alternate +// versions of objects to return - by default, runtime.APIVersionInternal is used. If any versions are specified, +// unrecognized groups will be returned in the version they are encoded as (no conversion). This decoder performs +// defaulting. +// +// TODO: the decoder will eventually be removed in favor of dealing with objects in their versioned form +// TODO: only accept a group versioner +func (f CodecFactory) UniversalDecoder(versions ...schema.GroupVersion) runtime.Decoder { + var versioner runtime.GroupVersioner + if len(versions) == 0 { + versioner = runtime.InternalGroupVersioner + } else { + versioner = schema.GroupVersions(versions) + } + return f.CodecForVersions(nil, f.universal, nil, versioner) +} + +// CodecForVersions creates a codec with the provided serializer. If an object is decoded and its group is not in the list, +// it will default to runtime.APIVersionInternal. If encode is not specified for an object's group, the object is not +// converted. If encode or decode are nil, no conversion is performed. +func (f CodecFactory) CodecForVersions(encoder runtime.Encoder, decoder runtime.Decoder, encode runtime.GroupVersioner, decode runtime.GroupVersioner) runtime.Codec { + // TODO: these are for backcompat, remove them in the future + if encode == nil { + encode = runtime.DisabledGroupVersioner + } + if decode == nil { + decode = runtime.InternalGroupVersioner + } + return versioning.NewDefaultingCodecForScheme(f.scheme, encoder, decoder, encode, decode) +} + +// DecoderToVersion returns a decoder that targets the provided group version. +func (f CodecFactory) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { + return f.CodecForVersions(nil, decoder, nil, gv) +} + +// EncoderForVersion returns an encoder that targets the provided group version. +func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { + return f.CodecForVersions(encoder, nil, gv, nil) +} + +// DirectCodecFactory provides methods for retrieving "DirectCodec"s, which do not do conversion. +type DirectCodecFactory struct { + CodecFactory +} + +// EncoderForVersion returns an encoder that does not do conversion. +func (f DirectCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder { + return versioning.DirectEncoder{ + Version: version, + Encoder: serializer, + ObjectTyper: f.CodecFactory.scheme, + } +} + +// DecoderToVersion returns an decoder that does not do conversion. gv is ignored. +func (f DirectCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { + return versioning.DirectDecoder{ + Decoder: serializer, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go new file mode 100644 index 000000000..28bb91b53 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -0,0 +1,245 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "encoding/json" + "io" + + "github.com/ghodss/yaml" + "github.com/ugorji/go/codec" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" + "k8s.io/apimachinery/pkg/util/framer" + utilyaml "k8s.io/apimachinery/pkg/util/yaml" +) + +// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer +// is not nil, the object has the group, version, and kind fields set. +func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer { + return &Serializer{ + meta: meta, + creater: creater, + typer: typer, + yaml: false, + pretty: pretty, + } +} + +// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer +// is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that +// matches JSON, and will error if constructs are used that do not serialize to JSON. +func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { + return &Serializer{ + meta: meta, + creater: creater, + typer: typer, + yaml: true, + } +} + +type Serializer struct { + meta MetaFactory + creater runtime.ObjectCreater + typer runtime.ObjectTyper + yaml bool + pretty bool +} + +// Serializer implements Serializer +var _ runtime.Serializer = &Serializer{} +var _ recognizer.RecognizingDecoder = &Serializer{} + +// Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then +// load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, the raw data will be +// extracted and no decoding will be performed. If into is not registered with the typer, then the object will be straight decoded using +// normal JSON/YAML unmarshalling. If into is provided and the original data is not fully qualified with kind/version/group, the type of +// the into will be used to alter the returned gvk. On success or most errors, the method will return the calculated schema kind. +func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + if versioned, ok := into.(*runtime.VersionedObjects); ok { + into = versioned.Last() + obj, actual, err := s.Decode(originalData, gvk, into) + if err != nil { + return nil, actual, err + } + versioned.Objects = []runtime.Object{obj} + return versioned, actual, nil + } + + data := originalData + if s.yaml { + altered, err := yaml.YAMLToJSON(data) + if err != nil { + return nil, nil, err + } + data = altered + } + + actual, err := s.meta.Interpret(data) + if err != nil { + return nil, nil, err + } + + if gvk != nil { + // apply kind and version defaulting from provided default + if len(actual.Kind) == 0 { + actual.Kind = gvk.Kind + } + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = gvk.Group + actual.Version = gvk.Version + } + if len(actual.Version) == 0 && actual.Group == gvk.Group { + actual.Version = gvk.Version + } + } + + if unk, ok := into.(*runtime.Unknown); ok && unk != nil { + unk.Raw = originalData + unk.ContentType = runtime.ContentTypeJSON + unk.GetObjectKind().SetGroupVersionKind(*actual) + return unk, actual, nil + } + + if into != nil { + types, _, err := s.typer.ObjectKinds(into) + switch { + case runtime.IsNotRegisteredError(err): + if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(into); err != nil { + return nil, actual, err + } + return into, actual, nil + case err != nil: + return nil, actual, err + default: + typed := types[0] + if len(actual.Kind) == 0 { + actual.Kind = typed.Kind + } + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = typed.Group + actual.Version = typed.Version + } + if len(actual.Version) == 0 && actual.Group == typed.Group { + actual.Version = typed.Version + } + } + } + + if len(actual.Kind) == 0 { + return nil, actual, runtime.NewMissingKindErr(string(originalData)) + } + if len(actual.Version) == 0 { + return nil, actual, runtime.NewMissingVersionErr(string(originalData)) + } + + // use the target if necessary + obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into) + if err != nil { + return nil, actual, err + } + + if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(obj); err != nil { + return nil, actual, err + } + return obj, actual, nil +} + +// Encode serializes the provided object to the given writer. +func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { + if s.yaml { + json, err := json.Marshal(obj) + if err != nil { + return err + } + data, err := yaml.JSONToYAML(json) + if err != nil { + return err + } + _, err = w.Write(data) + return err + } + + if s.pretty { + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + _, err = w.Write(data) + return err + } + encoder := json.NewEncoder(w) + return encoder.Encode(obj) +} + +// RecognizesData implements the RecognizingDecoder interface. +func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) { + if s.yaml { + // we could potentially look for '---' + return false, true, nil + } + _, _, ok = utilyaml.GuessJSONStream(peek, 2048) + return ok, false, nil +} + +// Framer is the default JSON framing behavior, with newlines delimiting individual objects. +var Framer = jsonFramer{} + +type jsonFramer struct{} + +// NewFrameWriter implements stream framing for this serializer +func (jsonFramer) NewFrameWriter(w io.Writer) io.Writer { + // we can write JSON objects directly to the writer, because they are self-framing + return w +} + +// NewFrameReader implements stream framing for this serializer +func (jsonFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { + // we need to extract the JSON chunks of data to pass to Decode() + return framer.NewJSONFramedReader(r) +} + +// Framer is the default JSON framing behavior, with newlines delimiting individual objects. +var YAMLFramer = yamlFramer{} + +type yamlFramer struct{} + +// NewFrameWriter implements stream framing for this serializer +func (yamlFramer) NewFrameWriter(w io.Writer) io.Writer { + return yamlFrameWriter{w} +} + +// NewFrameReader implements stream framing for this serializer +func (yamlFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { + // extract the YAML document chunks directly + return utilyaml.NewDocumentDecoder(r) +} + +type yamlFrameWriter struct { + w io.Writer +} + +// Write separates each document with the YAML document separator (`---` followed by line +// break). Writers must write well formed YAML documents (include a final line break). +func (w yamlFrameWriter) Write(data []byte) (n int, err error) { + if _, err := w.w.Write([]byte("---\n")); err != nil { + return 0, err + } + return w.w.Write(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go new file mode 100644 index 000000000..df3f5f989 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go @@ -0,0 +1,63 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// MetaFactory is used to store and retrieve the version and kind +// information for JSON objects in a serializer. +type MetaFactory interface { + // Interpret should return the version and kind of the wire-format of + // the object. + Interpret(data []byte) (*schema.GroupVersionKind, error) +} + +// DefaultMetaFactory is a default factory for versioning objects in JSON. The object +// in memory and in the default JSON serialization will use the "kind" and "apiVersion" +// fields. +var DefaultMetaFactory = SimpleMetaFactory{} + +// SimpleMetaFactory provides default methods for retrieving the type and version of objects +// that are identified with an "apiVersion" and "kind" fields in their JSON +// serialization. It may be parameterized with the names of the fields in memory, or an +// optional list of base structs to search for those fields in memory. +type SimpleMetaFactory struct { +} + +// Interpret will return the APIVersion and Kind of the JSON wire-format +// encoding of an object, or an error. +func (SimpleMetaFactory) Interpret(data []byte) (*schema.GroupVersionKind, error) { + findKind := struct { + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // +optional + Kind string `json:"kind,omitempty"` + }{} + if err := json.Unmarshal(data, &findKind); err != nil { + return nil, fmt.Errorf("couldn't get version/kind; json parse error: %v", err) + } + gv, err := schema.ParseGroupVersion(findKind.APIVersion) + if err != nil { + return nil, err + } + return &schema.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: findKind.Kind}, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go new file mode 100644 index 000000000..a42b4a41a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go @@ -0,0 +1,43 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serializer + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// TODO: We should split negotiated serializers that we can change versions on from those we can change +// serialization formats on +type negotiatedSerializerWrapper struct { + info runtime.SerializerInfo +} + +func NegotiatedSerializerWrapper(info runtime.SerializerInfo) runtime.NegotiatedSerializer { + return &negotiatedSerializerWrapper{info} +} + +func (n *negotiatedSerializerWrapper) SupportedMediaTypes() []runtime.SerializerInfo { + return []runtime.SerializerInfo{n.info} +} + +func (n *negotiatedSerializerWrapper) EncoderForVersion(e runtime.Encoder, _ runtime.GroupVersioner) runtime.Encoder { + return e +} + +func (n *negotiatedSerializerWrapper) DecoderToVersion(d runtime.Decoder, _gv runtime.GroupVersioner) runtime.Decoder { + return d +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go new file mode 100644 index 000000000..72d0ac79b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package protobuf provides a Kubernetes serializer for the protobuf format. +package protobuf // import "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go new file mode 100644 index 000000000..8d4ea7118 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -0,0 +1,448 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "bytes" + "fmt" + "io" + "reflect" + + "github.com/gogo/protobuf/proto" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" + "k8s.io/apimachinery/pkg/util/framer" +) + +var ( + // protoEncodingPrefix serves as a magic number for an encoded protobuf message on this serializer. All + // proto messages serialized by this schema will be preceded by the bytes 0x6b 0x38 0x73, with the fourth + // byte being reserved for the encoding style. The only encoding style defined is 0x00, which means that + // the rest of the byte stream is a message of type k8s.io.kubernetes.pkg.runtime.Unknown (proto2). + // + // See k8s.io/apimachinery/pkg/runtime/generated.proto for details of the runtime.Unknown message. + // + // This encoding scheme is experimental, and is subject to change at any time. + protoEncodingPrefix = []byte{0x6b, 0x38, 0x73, 0x00} +) + +type errNotMarshalable struct { + t reflect.Type +} + +func (e errNotMarshalable) Error() string { + return fmt.Sprintf("object %v does not implement the protobuf marshalling interface and cannot be encoded to a protobuf message", e.t) +} + +func IsNotMarshalable(err error) bool { + _, ok := err.(errNotMarshalable) + return err != nil && ok +} + +// NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer +// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written +// as-is (any type info passed with the object will be used). +// +// This encoding scheme is experimental, and is subject to change at any time. +func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *Serializer { + return &Serializer{ + prefix: protoEncodingPrefix, + creater: creater, + typer: typer, + contentType: defaultContentType, + } +} + +type Serializer struct { + prefix []byte + creater runtime.ObjectCreater + typer runtime.ObjectTyper + contentType string +} + +var _ runtime.Serializer = &Serializer{} +var _ recognizer.RecognizingDecoder = &Serializer{} + +// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default +// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, +// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will +// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is +// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most +// errors, the method will return the calculated schema kind. +func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + if versioned, ok := into.(*runtime.VersionedObjects); ok { + into = versioned.Last() + obj, actual, err := s.Decode(originalData, gvk, into) + if err != nil { + return nil, actual, err + } + // the last item in versioned becomes into, so if versioned was not originally empty we reset the object + // array so the first position is the decoded object and the second position is the outermost object. + // if there were no objects in the versioned list passed to us, only add ourselves. + if into != nil && into != obj { + versioned.Objects = []runtime.Object{obj, into} + } else { + versioned.Objects = []runtime.Object{obj} + } + return versioned, actual, err + } + + prefixLen := len(s.prefix) + switch { + case len(originalData) == 0: + // TODO: treat like decoding {} from JSON with defaulting + return nil, nil, fmt.Errorf("empty data") + case len(originalData) < prefixLen || !bytes.Equal(s.prefix, originalData[:prefixLen]): + return nil, nil, fmt.Errorf("provided data does not appear to be a protobuf message, expected prefix %v", s.prefix) + case len(originalData) == prefixLen: + // TODO: treat like decoding {} from JSON with defaulting + return nil, nil, fmt.Errorf("empty body") + } + + data := originalData[prefixLen:] + unk := runtime.Unknown{} + if err := unk.Unmarshal(data); err != nil { + return nil, nil, err + } + + actual := unk.GroupVersionKind() + copyKindDefaults(&actual, gvk) + + if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { + *intoUnknown = unk + if ok, _, _ := s.RecognizesData(bytes.NewBuffer(unk.Raw)); ok { + intoUnknown.ContentType = s.contentType + } + return intoUnknown, &actual, nil + } + + if into != nil { + types, _, err := s.typer.ObjectKinds(into) + switch { + case runtime.IsNotRegisteredError(err): + pb, ok := into.(proto.Message) + if !ok { + return nil, &actual, errNotMarshalable{reflect.TypeOf(into)} + } + if err := proto.Unmarshal(unk.Raw, pb); err != nil { + return nil, &actual, err + } + return into, &actual, nil + case err != nil: + return nil, &actual, err + default: + copyKindDefaults(&actual, &types[0]) + // if the result of defaulting did not set a version or group, ensure that at least group is set + // (copyKindDefaults will not assign Group if version is already set). This guarantees that the group + // of into is set if there is no better information from the caller or object. + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = types[0].Group + } + } + } + + if len(actual.Kind) == 0 { + return nil, &actual, runtime.NewMissingKindErr(fmt.Sprintf("%#v", unk.TypeMeta)) + } + if len(actual.Version) == 0 { + return nil, &actual, runtime.NewMissingVersionErr(fmt.Sprintf("%#v", unk.TypeMeta)) + } + + return unmarshalToObject(s.typer, s.creater, &actual, into, unk.Raw) +} + +// Encode serializes the provided object to the given writer. +func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { + prefixSize := uint64(len(s.prefix)) + + var unk runtime.Unknown + switch t := obj.(type) { + case *runtime.Unknown: + estimatedSize := prefixSize + uint64(t.Size()) + data := make([]byte, estimatedSize) + i, err := t.MarshalTo(data[prefixSize:]) + if err != nil { + return err + } + copy(data, s.prefix) + _, err = w.Write(data[:prefixSize+uint64(i)]) + return err + default: + kind := obj.GetObjectKind().GroupVersionKind() + unk = runtime.Unknown{ + TypeMeta: runtime.TypeMeta{ + Kind: kind.Kind, + APIVersion: kind.GroupVersion().String(), + }, + } + } + + switch t := obj.(type) { + case bufferedMarshaller: + // this path performs a single allocation during write but requires the caller to implement + // the more efficient Size and MarshalTo methods + encodedSize := uint64(t.Size()) + estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize) + data := make([]byte, estimatedSize) + + i, err := unk.NestedMarshalTo(data[prefixSize:], t, encodedSize) + if err != nil { + return err + } + + copy(data, s.prefix) + + _, err = w.Write(data[:prefixSize+uint64(i)]) + return err + + case proto.Marshaler: + // this path performs extra allocations + data, err := t.Marshal() + if err != nil { + return err + } + unk.Raw = data + + estimatedSize := prefixSize + uint64(unk.Size()) + data = make([]byte, estimatedSize) + + i, err := unk.MarshalTo(data[prefixSize:]) + if err != nil { + return err + } + + copy(data, s.prefix) + + _, err = w.Write(data[:prefixSize+uint64(i)]) + return err + + default: + // TODO: marshal with a different content type and serializer (JSON for third party objects) + return errNotMarshalable{reflect.TypeOf(obj)} + } +} + +// RecognizesData implements the RecognizingDecoder interface. +func (s *Serializer) RecognizesData(peek io.Reader) (bool, bool, error) { + prefix := make([]byte, 4) + n, err := peek.Read(prefix) + if err != nil { + if err == io.EOF { + return false, false, nil + } + return false, false, err + } + if n != 4 { + return false, false, nil + } + return bytes.Equal(s.prefix, prefix), false, nil +} + +// copyKindDefaults defaults dst to the value in src if dst does not have a value set. +func copyKindDefaults(dst, src *schema.GroupVersionKind) { + if src == nil { + return + } + // apply kind and version defaulting from provided default + if len(dst.Kind) == 0 { + dst.Kind = src.Kind + } + if len(dst.Version) == 0 && len(src.Version) > 0 { + dst.Group = src.Group + dst.Version = src.Version + } +} + +// bufferedMarshaller describes a more efficient marshalling interface that can avoid allocating multiple +// byte buffers by pre-calculating the size of the final buffer needed. +type bufferedMarshaller interface { + proto.Sizer + runtime.ProtobufMarshaller +} + +// estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown +// object with a nil RawJSON struct and the expected size of the provided buffer. The +// returned size will not be correct if RawJSOn is set on unk. +func estimateUnknownSize(unk *runtime.Unknown, byteSize uint64) uint64 { + size := uint64(unk.Size()) + // protobuf uses 1 byte for the tag, a varint for the length of the array (at most 8 bytes - uint64 - here), + // and the size of the array. + size += 1 + 8 + byteSize + return size +} + +// NewRawSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If typer +// is not nil, the object has the group, version, and kind fields set. This serializer does not provide type information for the +// encoded object, and thus is not self describing (callers must know what type is being described in order to decode). +// +// This encoding scheme is experimental, and is subject to change at any time. +func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *RawSerializer { + return &RawSerializer{ + creater: creater, + typer: typer, + contentType: defaultContentType, + } +} + +// RawSerializer encodes and decodes objects without adding a runtime.Unknown wrapper (objects are encoded without identifying +// type). +type RawSerializer struct { + creater runtime.ObjectCreater + typer runtime.ObjectTyper + contentType string +} + +var _ runtime.Serializer = &RawSerializer{} + +// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default +// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, +// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will +// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is +// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most +// errors, the method will return the calculated schema kind. +func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + if into == nil { + return nil, nil, fmt.Errorf("this serializer requires an object to decode into: %#v", s) + } + + if versioned, ok := into.(*runtime.VersionedObjects); ok { + into = versioned.Last() + obj, actual, err := s.Decode(originalData, gvk, into) + if err != nil { + return nil, actual, err + } + if into != nil && into != obj { + versioned.Objects = []runtime.Object{obj, into} + } else { + versioned.Objects = []runtime.Object{obj} + } + return versioned, actual, err + } + + if len(originalData) == 0 { + // TODO: treat like decoding {} from JSON with defaulting + return nil, nil, fmt.Errorf("empty data") + } + data := originalData + + actual := &schema.GroupVersionKind{} + copyKindDefaults(actual, gvk) + + if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { + intoUnknown.Raw = data + intoUnknown.ContentEncoding = "" + intoUnknown.ContentType = s.contentType + intoUnknown.SetGroupVersionKind(*actual) + return intoUnknown, actual, nil + } + + types, _, err := s.typer.ObjectKinds(into) + switch { + case runtime.IsNotRegisteredError(err): + pb, ok := into.(proto.Message) + if !ok { + return nil, actual, errNotMarshalable{reflect.TypeOf(into)} + } + if err := proto.Unmarshal(data, pb); err != nil { + return nil, actual, err + } + return into, actual, nil + case err != nil: + return nil, actual, err + default: + copyKindDefaults(actual, &types[0]) + // if the result of defaulting did not set a version or group, ensure that at least group is set + // (copyKindDefaults will not assign Group if version is already set). This guarantees that the group + // of into is set if there is no better information from the caller or object. + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = types[0].Group + } + } + + if len(actual.Kind) == 0 { + return nil, actual, runtime.NewMissingKindErr("") + } + if len(actual.Version) == 0 { + return nil, actual, runtime.NewMissingVersionErr("") + } + + return unmarshalToObject(s.typer, s.creater, actual, into, data) +} + +// unmarshalToObject is the common code between decode in the raw and normal serializer. +func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, actual *schema.GroupVersionKind, into runtime.Object, data []byte) (runtime.Object, *schema.GroupVersionKind, error) { + // use the target if necessary + obj, err := runtime.UseOrCreateObject(typer, creater, *actual, into) + if err != nil { + return nil, actual, err + } + + pb, ok := obj.(proto.Message) + if !ok { + return nil, actual, errNotMarshalable{reflect.TypeOf(obj)} + } + if err := proto.Unmarshal(data, pb); err != nil { + return nil, actual, err + } + return obj, actual, nil +} + +// Encode serializes the provided object to the given writer. Overrides is ignored. +func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error { + switch t := obj.(type) { + case bufferedMarshaller: + // this path performs a single allocation during write but requires the caller to implement + // the more efficient Size and MarshalTo methods + encodedSize := uint64(t.Size()) + data := make([]byte, encodedSize) + + n, err := t.MarshalTo(data) + if err != nil { + return err + } + _, err = w.Write(data[:n]) + return err + + case proto.Marshaler: + // this path performs extra allocations + data, err := t.Marshal() + if err != nil { + return err + } + _, err = w.Write(data) + return err + + default: + return errNotMarshalable{reflect.TypeOf(obj)} + } +} + +var LengthDelimitedFramer = lengthDelimitedFramer{} + +type lengthDelimitedFramer struct{} + +// NewFrameWriter implements stream framing for this serializer +func (lengthDelimitedFramer) NewFrameWriter(w io.Writer) io.Writer { + return framer.NewLengthDelimitedFrameWriter(w) +} + +// NewFrameReader implements stream framing for this serializer +func (lengthDelimitedFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { + return framer.NewLengthDelimitedFrameReader(r) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go new file mode 100644 index 000000000..545cf78df --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go @@ -0,0 +1,48 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serializer + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" +) + +const ( + // contentTypeProtobuf is the protobuf type exposed for Kubernetes. It is private to prevent others from + // depending on it unintentionally. + // TODO: potentially move to pkg/api (since it's part of the Kube public API) and pass it in to the + // CodecFactory on initialization. + contentTypeProtobuf = "application/vnd.kubernetes.protobuf" +) + +func protobufSerializer(scheme *runtime.Scheme) (serializerType, bool) { + serializer := protobuf.NewSerializer(scheme, scheme, contentTypeProtobuf) + raw := protobuf.NewRawSerializer(scheme, scheme, contentTypeProtobuf) + return serializerType{ + AcceptContentTypes: []string{contentTypeProtobuf}, + ContentType: contentTypeProtobuf, + FileExtensions: []string{"pb"}, + Serializer: serializer, + + Framer: protobuf.LengthDelimitedFramer, + StreamSerializer: raw, + }, true +} + +func init() { + serializerExtensions = append(serializerExtensions, protobufSerializer) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go new file mode 100644 index 000000000..38497ab53 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go @@ -0,0 +1,127 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package recognizer + +import ( + "bufio" + "bytes" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type RecognizingDecoder interface { + runtime.Decoder + // RecognizesData should return true if the input provided in the provided reader + // belongs to this decoder, or an error if the data could not be read or is ambiguous. + // Unknown is true if the data could not be determined to match the decoder type. + // Decoders should assume that they can read as much of peek as they need (as the caller + // provides) and may return unknown if the data provided is not sufficient to make a + // a determination. When peek returns EOF that may mean the end of the input or the + // end of buffered input - recognizers should return the best guess at that time. + RecognizesData(peek io.Reader) (ok, unknown bool, err error) +} + +// NewDecoder creates a decoder that will attempt multiple decoders in an order defined +// by: +// +// 1. The decoder implements RecognizingDecoder and identifies the data +// 2. All other decoders, and any decoder that returned true for unknown. +// +// The order passed to the constructor is preserved within those priorities. +func NewDecoder(decoders ...runtime.Decoder) runtime.Decoder { + return &decoder{ + decoders: decoders, + } +} + +type decoder struct { + decoders []runtime.Decoder +} + +var _ RecognizingDecoder = &decoder{} + +func (d *decoder) RecognizesData(peek io.Reader) (bool, bool, error) { + var ( + lastErr error + anyUnknown bool + ) + data, _ := bufio.NewReaderSize(peek, 1024).Peek(1024) + for _, r := range d.decoders { + switch t := r.(type) { + case RecognizingDecoder: + ok, unknown, err := t.RecognizesData(bytes.NewBuffer(data)) + if err != nil { + lastErr = err + continue + } + anyUnknown = anyUnknown || unknown + if !ok { + continue + } + return true, false, nil + } + } + return false, anyUnknown, lastErr +} + +func (d *decoder) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + var ( + lastErr error + skipped []runtime.Decoder + ) + + // try recognizers, record any decoders we need to give a chance later + for _, r := range d.decoders { + switch t := r.(type) { + case RecognizingDecoder: + buf := bytes.NewBuffer(data) + ok, unknown, err := t.RecognizesData(buf) + if err != nil { + lastErr = err + continue + } + if unknown { + skipped = append(skipped, t) + continue + } + if !ok { + continue + } + return r.Decode(data, gvk, into) + default: + skipped = append(skipped, t) + } + } + + // try recognizers that returned unknown or didn't recognize their data + for _, r := range skipped { + out, actual, err := r.Decode(data, gvk, into) + if err != nil { + lastErr = err + continue + } + return out, actual, nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no serialization format matched the provided data") + } + return nil, nil, lastErr +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go new file mode 100644 index 000000000..91fd4ed4f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go @@ -0,0 +1,137 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package streaming implements encoder and decoder for streams +// of runtime.Objects over io.Writer/Readers. +package streaming + +import ( + "bytes" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Encoder is a runtime.Encoder on a stream. +type Encoder interface { + // Encode will write the provided object to the stream or return an error. It obeys the same + // contract as runtime.VersionedEncoder. + Encode(obj runtime.Object) error +} + +// Decoder is a runtime.Decoder from a stream. +type Decoder interface { + // Decode will return io.EOF when no more objects are available. + Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) + // Close closes the underlying stream. + Close() error +} + +// Serializer is a factory for creating encoders and decoders that work over streams. +type Serializer interface { + NewEncoder(w io.Writer) Encoder + NewDecoder(r io.ReadCloser) Decoder +} + +type decoder struct { + reader io.ReadCloser + decoder runtime.Decoder + buf []byte + maxBytes int + resetRead bool +} + +// NewDecoder creates a streaming decoder that reads object chunks from r and decodes them with d. +// The reader is expected to return ErrShortRead if the provided buffer is not large enough to read +// an entire object. +func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder { + return &decoder{ + reader: r, + decoder: d, + buf: make([]byte, 1024), + maxBytes: 1024 * 1024, + } +} + +var ErrObjectTooLarge = fmt.Errorf("object to decode was longer than maximum allowed size") + +// Decode reads the next object from the stream and decodes it. +func (d *decoder) Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + base := 0 + for { + n, err := d.reader.Read(d.buf[base:]) + if err == io.ErrShortBuffer { + if n == 0 { + return nil, nil, fmt.Errorf("got short buffer with n=0, base=%d, cap=%d", base, cap(d.buf)) + } + if d.resetRead { + continue + } + // double the buffer size up to maxBytes + if len(d.buf) < d.maxBytes { + base += n + d.buf = append(d.buf, make([]byte, len(d.buf))...) + continue + } + // must read the rest of the frame (until we stop getting ErrShortBuffer) + d.resetRead = true + base = 0 + return nil, nil, ErrObjectTooLarge + } + if err != nil { + return nil, nil, err + } + if d.resetRead { + // now that we have drained the large read, continue + d.resetRead = false + continue + } + base += n + break + } + return d.decoder.Decode(d.buf[:base], defaults, into) +} + +func (d *decoder) Close() error { + return d.reader.Close() +} + +type encoder struct { + writer io.Writer + encoder runtime.Encoder + buf *bytes.Buffer +} + +// NewEncoder returns a new streaming encoder. +func NewEncoder(w io.Writer, e runtime.Encoder) Encoder { + return &encoder{ + writer: w, + encoder: e, + buf: &bytes.Buffer{}, + } +} + +// Encode writes the provided object to the nested writer. +func (e *encoder) Encode(obj runtime.Object) error { + if err := e.encoder.Encode(obj, e.buf); err != nil { + return err + } + _, err := e.writer.Write(e.buf.Bytes()) + e.buf.Reset() + return err +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go new file mode 100644 index 000000000..829d4fa89 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -0,0 +1,273 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versioning + +import ( + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// NewCodecForScheme is a convenience method for callers that are using a scheme. +func NewCodecForScheme( + // TODO: I should be a scheme interface? + scheme *runtime.Scheme, + encoder runtime.Encoder, + decoder runtime.Decoder, + encodeVersion runtime.GroupVersioner, + decodeVersion runtime.GroupVersioner, +) runtime.Codec { + return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, nil, encodeVersion, decodeVersion) +} + +// NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme. +func NewDefaultingCodecForScheme( + // TODO: I should be a scheme interface? + scheme *runtime.Scheme, + encoder runtime.Encoder, + decoder runtime.Decoder, + encodeVersion runtime.GroupVersioner, + decodeVersion runtime.GroupVersioner, +) runtime.Codec { + return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, scheme, encodeVersion, decodeVersion) +} + +// NewCodec takes objects in their internal versions and converts them to external versions before +// serializing them. It assumes the serializer provided to it only deals with external versions. +// This class is also a serializer, but is generally used with a specific version. +func NewCodec( + encoder runtime.Encoder, + decoder runtime.Decoder, + convertor runtime.ObjectConvertor, + creater runtime.ObjectCreater, + copier runtime.ObjectCopier, + typer runtime.ObjectTyper, + defaulter runtime.ObjectDefaulter, + encodeVersion runtime.GroupVersioner, + decodeVersion runtime.GroupVersioner, +) runtime.Codec { + internal := &codec{ + encoder: encoder, + decoder: decoder, + convertor: convertor, + creater: creater, + copier: copier, + typer: typer, + defaulter: defaulter, + + encodeVersion: encodeVersion, + decodeVersion: decodeVersion, + } + return internal +} + +type codec struct { + encoder runtime.Encoder + decoder runtime.Decoder + convertor runtime.ObjectConvertor + creater runtime.ObjectCreater + copier runtime.ObjectCopier + typer runtime.ObjectTyper + defaulter runtime.ObjectDefaulter + + encodeVersion runtime.GroupVersioner + decodeVersion runtime.GroupVersioner +} + +// Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is +// successful, the returned runtime.Object will be the value passed as into. Note that this may bypass conversion if you pass an +// into that matches the serialized version. +func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + versioned, isVersioned := into.(*runtime.VersionedObjects) + if isVersioned { + into = versioned.Last() + } + + obj, gvk, err := c.decoder.Decode(data, defaultGVK, into) + if err != nil { + return nil, gvk, err + } + + if d, ok := obj.(runtime.NestedObjectDecoder); ok { + if err := d.DecodeNestedObjects(DirectDecoder{c.decoder}); err != nil { + return nil, gvk, err + } + } + + // if we specify a target, use generic conversion. + if into != nil { + if into == obj { + if isVersioned { + return versioned, gvk, nil + } + return into, gvk, nil + } + + // perform defaulting if requested + if c.defaulter != nil { + // create a copy to ensure defaulting is not applied to the original versioned objects + if isVersioned { + copied, err := c.copier.Copy(obj) + if err != nil { + utilruntime.HandleError(err) + copied = obj + } + versioned.Objects = []runtime.Object{copied} + } + c.defaulter.Default(obj) + } else { + if isVersioned { + versioned.Objects = []runtime.Object{obj} + } + } + + if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil { + return nil, gvk, err + } + + if isVersioned { + versioned.Objects = append(versioned.Objects, into) + return versioned, gvk, nil + } + return into, gvk, nil + } + + // Convert if needed. + if isVersioned { + // create a copy, because ConvertToVersion does not guarantee non-mutation of objects + copied, err := c.copier.Copy(obj) + if err != nil { + utilruntime.HandleError(err) + copied = obj + } + versioned.Objects = []runtime.Object{copied} + } + + // perform defaulting if requested + if c.defaulter != nil { + c.defaulter.Default(obj) + } + + out, err := c.convertor.ConvertToVersion(obj, c.decodeVersion) + if err != nil { + return nil, gvk, err + } + if isVersioned { + if versioned.Last() != out { + versioned.Objects = append(versioned.Objects, out) + } + return versioned, gvk, nil + } + return out, gvk, nil +} + +// Encode ensures the provided object is output in the appropriate group and version, invoking +// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is. +func (c *codec) Encode(obj runtime.Object, w io.Writer) error { + switch obj.(type) { + case *runtime.Unknown, runtime.Unstructured: + return c.encoder.Encode(obj, w) + } + + gvks, isUnversioned, err := c.typer.ObjectKinds(obj) + if err != nil { + return err + } + + if c.encodeVersion == nil || isUnversioned { + if e, ok := obj.(runtime.NestedObjectEncoder); ok { + if err := e.EncodeNestedObjects(DirectEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + return err + } + } + objectKind := obj.GetObjectKind() + old := objectKind.GroupVersionKind() + objectKind.SetGroupVersionKind(gvks[0]) + err = c.encoder.Encode(obj, w) + objectKind.SetGroupVersionKind(old) + return err + } + + // Perform a conversion if necessary + objectKind := obj.GetObjectKind() + old := objectKind.GroupVersionKind() + out, err := c.convertor.ConvertToVersion(obj, c.encodeVersion) + if err != nil { + return err + } + + if e, ok := out.(runtime.NestedObjectEncoder); ok { + if err := e.EncodeNestedObjects(DirectEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + return err + } + } + + // Conversion is responsible for setting the proper group, version, and kind onto the outgoing object + err = c.encoder.Encode(out, w) + // restore the old GVK, in case conversion returned the same object + objectKind.SetGroupVersionKind(old) + return err +} + +// DirectEncoder serializes an object and ensures the GVK is set. +type DirectEncoder struct { + Version runtime.GroupVersioner + runtime.Encoder + runtime.ObjectTyper +} + +// Encode does not do conversion. It sets the gvk during serialization. +func (e DirectEncoder) Encode(obj runtime.Object, stream io.Writer) error { + gvks, _, err := e.ObjectTyper.ObjectKinds(obj) + if err != nil { + if runtime.IsNotRegisteredError(err) { + return e.Encoder.Encode(obj, stream) + } + return err + } + kind := obj.GetObjectKind() + oldGVK := kind.GroupVersionKind() + gvk := gvks[0] + if e.Version != nil { + preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks) + if ok { + gvk = preferredGVK + } + } + kind.SetGroupVersionKind(gvk) + err = e.Encoder.Encode(obj, stream) + kind.SetGroupVersionKind(oldGVK) + return err +} + +// DirectDecoder clears the group version kind of a deserialized object. +type DirectDecoder struct { + runtime.Decoder +} + +// Decode does not do conversion. It removes the gvk during deserialization. +func (d DirectDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + obj, gvk, err := d.Decoder.Decode(data, defaults, into) + if obj != nil { + kind := obj.GetObjectKind() + // clearing the gvk is just a convention of a codec + kind.SetGroupVersionKind(schema.GroupVersionKind{}) + } + return obj, gvk, err +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go new file mode 100644 index 000000000..29722d52e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go @@ -0,0 +1,262 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "bytes" + "fmt" + "go/ast" + "go/doc" + "go/parser" + "go/token" + "io" + "reflect" + "strings" +) + +// Pair of strings. We keed the name of fields and the doc +type Pair struct { + Name, Doc string +} + +// KubeTypes is an array to represent all available types in a parsed file. [0] is for the type itself +type KubeTypes []Pair + +func astFrom(filePath string) *doc.Package { + fset := token.NewFileSet() + m := make(map[string]*ast.File) + + f, err := parser.ParseFile(fset, filePath, nil, parser.ParseComments) + if err != nil { + fmt.Println(err) + return nil + } + + m[filePath] = f + apkg, _ := ast.NewPackage(fset, m, nil, nil) + + return doc.New(apkg, "", 0) +} + +func fmtRawDoc(rawDoc string) string { + var buffer bytes.Buffer + delPrevChar := func() { + if buffer.Len() > 0 { + buffer.Truncate(buffer.Len() - 1) // Delete the last " " or "\n" + } + } + + // Ignore all lines after --- + rawDoc = strings.Split(rawDoc, "---")[0] + + for _, line := range strings.Split(rawDoc, "\n") { + line = strings.TrimRight(line, " ") + leading := strings.TrimLeft(line, " ") + switch { + case len(line) == 0: // Keep paragraphs + delPrevChar() + buffer.WriteString("\n\n") + case strings.HasPrefix(leading, "TODO"): // Ignore one line TODOs + case strings.HasPrefix(leading, "+"): // Ignore instructions to go2idl + default: + if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") { + delPrevChar() + line = "\n" + line + "\n" // Replace it with newline. This is useful when we have a line with: "Example:\n\tJSON-someting..." + } else { + line += " " + } + buffer.WriteString(line) + } + } + + postDoc := strings.TrimRight(buffer.String(), "\n") + postDoc = strings.Replace(postDoc, "\\\"", "\"", -1) // replace user's \" to " + postDoc = strings.Replace(postDoc, "\"", "\\\"", -1) // Escape " + postDoc = strings.Replace(postDoc, "\n", "\\n", -1) + postDoc = strings.Replace(postDoc, "\t", "\\t", -1) + + return postDoc +} + +// fieldName returns the name of the field as it should appear in JSON format +// "-" indicates that this field is not part of the JSON representation +func fieldName(field *ast.Field) string { + jsonTag := "" + if field.Tag != nil { + jsonTag = reflect.StructTag(field.Tag.Value[1 : len(field.Tag.Value)-1]).Get("json") // Delete first and last quotation + if strings.Contains(jsonTag, "inline") { + return "-" + } + } + + jsonTag = strings.Split(jsonTag, ",")[0] // This can return "-" + if jsonTag == "" { + if field.Names != nil { + return field.Names[0].Name + } + return field.Type.(*ast.Ident).Name + } + return jsonTag +} + +// A buffer of lines that will be written. +type bufferedLine struct { + line string + indentation int +} + +type buffer struct { + lines []bufferedLine +} + +func newBuffer() *buffer { + return &buffer{ + lines: make([]bufferedLine, 0), + } +} + +func (b *buffer) addLine(line string, indent int) { + b.lines = append(b.lines, bufferedLine{line, indent}) +} + +func (b *buffer) flushLines(w io.Writer) error { + for _, line := range b.lines { + indentation := strings.Repeat("\t", line.indentation) + fullLine := fmt.Sprintf("%s%s", indentation, line.line) + if _, err := io.WriteString(w, fullLine); err != nil { + return err + } + } + return nil +} + +func writeFuncHeader(b *buffer, structName string, indent int) { + s := fmt.Sprintf("var map_%s = map[string]string {\n", structName) + b.addLine(s, indent) +} + +func writeFuncFooter(b *buffer, structName string, indent int) { + b.addLine("}\n", indent) // Closes the map definition + + s := fmt.Sprintf("func (%s) SwaggerDoc() map[string]string {\n", structName) + b.addLine(s, indent) + s = fmt.Sprintf("return map_%s\n", structName) + b.addLine(s, indent+1) + b.addLine("}\n", indent) // Closes the function definition +} + +func writeMapBody(b *buffer, kubeType []Pair, indent int) { + format := "\"%s\": \"%s\",\n" + for _, pair := range kubeType { + s := fmt.Sprintf(format, pair.Name, pair.Doc) + b.addLine(s, indent+2) + } +} + +// ParseDocumentationFrom gets all types' documentation and returns them as an +// array. Each type is again represented as an array (we have to use arrays as we +// need to be sure for the order of the fields). This function returns fields and +// struct definitions that have no documentation as {name, ""}. +func ParseDocumentationFrom(src string) []KubeTypes { + var docForTypes []KubeTypes + + pkg := astFrom(src) + + for _, kubType := range pkg.Types { + if structType, ok := kubType.Decl.Specs[0].(*ast.TypeSpec).Type.(*ast.StructType); ok { + var ks KubeTypes + ks = append(ks, Pair{kubType.Name, fmtRawDoc(kubType.Doc)}) + + for _, field := range structType.Fields.List { + if n := fieldName(field); n != "-" { + fieldDoc := fmtRawDoc(field.Doc.Text()) + ks = append(ks, Pair{n, fieldDoc}) + } + } + docForTypes = append(docForTypes, ks) + } + } + + return docForTypes +} + +// WriteSwaggerDocFunc writes a declaration of a function as a string. This function is used in +// Swagger as a documentation source for structs and theirs fields +func WriteSwaggerDocFunc(kubeTypes []KubeTypes, w io.Writer) error { + for _, kubeType := range kubeTypes { + structName := kubeType[0].Name + kubeType[0].Name = "" + + // Ignore empty documentation + docfulTypes := make(KubeTypes, 0, len(kubeType)) + for _, pair := range kubeType { + if pair.Doc != "" { + docfulTypes = append(docfulTypes, pair) + } + } + + if len(docfulTypes) == 0 { + continue // If no fields and the struct have documentation, skip the function definition + } + + indent := 0 + buffer := newBuffer() + + writeFuncHeader(buffer, structName, indent) + writeMapBody(buffer, docfulTypes, indent) + writeFuncFooter(buffer, structName, indent) + buffer.addLine("\n", 0) + + if err := buffer.flushLines(w); err != nil { + return err + } + } + + return nil +} + +// VerifySwaggerDocsExist writes in a io.Writer a list of structs and fields that +// are missing of documentation. +func VerifySwaggerDocsExist(kubeTypes []KubeTypes, w io.Writer) (int, error) { + missingDocs := 0 + buffer := newBuffer() + + for _, kubeType := range kubeTypes { + structName := kubeType[0].Name + if kubeType[0].Doc == "" { + format := "Missing documentation for the struct itself: %s\n" + s := fmt.Sprintf(format, structName) + buffer.addLine(s, 0) + missingDocs++ + } + kubeType = kubeType[1:] // Skip struct definition + + for _, pair := range kubeType { // Iterate only the fields + if pair.Doc == "" { + format := "In struct: %s, field documentation is missing: %s\n" + s := fmt.Sprintf(format, structName, pair.Name) + buffer.addLine(s, 0) + missingDocs++ + } + } + } + + if err := buffer.flushLines(w); err != nil { + return -1, err + } + return missingDocs, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go new file mode 100644 index 000000000..f972c5e69 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -0,0 +1,133 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +// Note that the types provided in this file are not versioned and are intended to be +// safe to use from within all versions of every API object. + +// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, +// like this: +// type MyAwesomeAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// ... // other fields +// } +// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind +// +// TypeMeta is provided here for convenience. You may use it directly from this package or define +// your own with the same fields. +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +type TypeMeta struct { + // +optional + APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` + // +optional + Kind string `json:"kind,omitempty" yaml:"kind,omitempty" protobuf:"bytes,2,opt,name=kind"` +} + +const ( + ContentTypeJSON string = "application/json" +) + +// RawExtension is used to hold extensions in external versions. +// +// To use this, make a field which has RawExtension as its type in your external, versioned +// struct, and Object in your internal struct. You also need to register your +// various plugin types. +// +// // Internal package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.Object `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // External package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.RawExtension `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // On the wire, the JSON will look something like this: +// { +// "kind":"MyAPIObject", +// "apiVersion":"v1", +// "myPlugin": { +// "kind":"PluginA", +// "aOption":"foo", +// }, +// } +// +// So what happens? Decode first uses json or yaml to unmarshal the serialized data into +// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. +// The next step is to copy (using pkg/conversion) into the internal struct. The runtime +// package's DefaultScheme has conversion functions installed which will unpack the +// JSON stored in RawExtension, turning it into the correct object type, and storing it +// in the Object. (TODO: In the case where the object is of an unknown type, a +// runtime.Unknown object will be created and stored.) +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +type RawExtension struct { + // Raw is the underlying serialization of this object. + // + // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. + Raw []byte `protobuf:"bytes,1,opt,name=raw"` + // Object can hold a representation of this extension - useful for working with versioned + // structs. + Object Object `json:"-"` +} + +// Unknown allows api objects with unknown types to be passed-through. This can be used +// to deal with the API objects from a plug-in. Unknown objects still have functioning +// TypeMeta features-- kind, version, etc. +// TODO: Make this object have easy access to field based accessors and settors for +// metadata and field mutatation. +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +type Unknown struct { + TypeMeta `json:",inline" protobuf:"bytes,1,opt,name=typeMeta"` + // Raw will hold the complete serialized object which couldn't be matched + // with a registered type. Most likely, nothing should be done with this + // except for passing it through the system. + Raw []byte `protobuf:"bytes,2,opt,name=raw"` + // ContentEncoding is encoding used to encode 'Raw' data. + // Unspecified means no encoding. + ContentEncoding string `protobuf:"bytes,3,opt,name=contentEncoding"` + // ContentType is serialization method used to serialize 'Raw'. + // Unspecified means ContentTypeJSON. + ContentType string `protobuf:"bytes,4,opt,name=contentType"` +} + +// VersionedObjects is used by Decoders to give callers a way to access all versions +// of an object during the decoding process. +type VersionedObjects struct { + // Objects is the set of objects retrieved during decoding, in order of conversion. + // The 0 index is the object as serialized on the wire. If conversion has occurred, + // other objects may be present. The right most object is the same as would be returned + // by a normal Decode call. + Objects []Object +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go new file mode 100644 index 000000000..ead96ee05 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go @@ -0,0 +1,69 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" +) + +type ProtobufMarshaller interface { + MarshalTo(data []byte) (int, error) +} + +// NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown +// that will contain an object that implements ProtobufMarshaller. +func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size())) + n1, err := m.TypeMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + + if b != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, size) + n2, err := b.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + if uint64(n2) != size { + // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto + // struct returned would be wrong. + return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n2) + } + i += n2 + } + + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) + i += copy(data[i:], m.ContentEncoding) + + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) + i += copy(data[i:], m.ContentType) + return i, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go new file mode 100644 index 000000000..54ce6ad59 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -0,0 +1,83 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package runtime + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + reflect "reflect" +) + +// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { + return []conversion.GeneratedDeepCopyFunc{ + {Fn: DeepCopy_runtime_RawExtension, InType: reflect.TypeOf(&RawExtension{})}, + {Fn: DeepCopy_runtime_TypeMeta, InType: reflect.TypeOf(&TypeMeta{})}, + {Fn: DeepCopy_runtime_Unknown, InType: reflect.TypeOf(&Unknown{})}, + } +} + +// DeepCopy_runtime_RawExtension is an autogenerated deepcopy function. +func DeepCopy_runtime_RawExtension(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RawExtension) + out := out.(*RawExtension) + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + // in.Object is kind 'Interface' + if in.Object != nil { + if newVal, err := c.DeepCopy(&in.Object); err != nil { + return err + } else { + out.Object = *newVal.(*Object) + } + } + return nil + } +} + +// DeepCopy_runtime_TypeMeta is an autogenerated deepcopy function. +func DeepCopy_runtime_TypeMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TypeMeta) + out := out.(*TypeMeta) + *out = *in + return nil + } +} + +// DeepCopy_runtime_Unknown is an autogenerated deepcopy function. +func DeepCopy_runtime_Unknown(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Unknown) + out := out.(*Unknown) + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/selection/operator.go b/vendor/k8s.io/apimachinery/pkg/selection/operator.go new file mode 100644 index 000000000..298f798c4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/selection/operator.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selection + +// Operator represents a key/field's relationship to value(s). +// See labels.Requirement and fields.Requirement for more details. +type Operator string + +const ( + DoesNotExist Operator = "!" + Equals Operator = "=" + DoubleEquals Operator = "==" + In Operator = "in" + NotEquals Operator = "!=" + NotIn Operator = "notin" + Exists Operator = "exists" + GreaterThan Operator = "gt" + LessThan Operator = "lt" +) diff --git a/vendor/k8s.io/apimachinery/pkg/types/doc.go b/vendor/k8s.io/apimachinery/pkg/types/doc.go new file mode 100644 index 000000000..5667fa992 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package types implements various generic types used throughout kubernetes. +package types // import "k8s.io/apimachinery/pkg/types" diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go new file mode 100644 index 000000000..1e2130da0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go @@ -0,0 +1,60 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "fmt" + "strings" +) + +// NamespacedName comprises a resource name, with a mandatory namespace, +// rendered as "/". Being a type captures intent and +// helps make sure that UIDs, namespaced names and non-namespaced names +// do not get conflated in code. For most use cases, namespace and name +// will already have been format validated at the API entry point, so we +// don't do that here. Where that's not the case (e.g. in testing), +// consider using NamespacedNameOrDie() in testing.go in this package. + +type NamespacedName struct { + Namespace string + Name string +} + +const ( + Separator = '/' +) + +// String returns the general purpose string representation +func (n NamespacedName) String() string { + return fmt.Sprintf("%s%c%s", n.Namespace, Separator, n.Name) +} + +// NewNamespacedNameFromString parses the provided string and returns a NamespacedName. +// The expected format is as per String() above. +// If the input string is invalid, the returned NamespacedName has all empty string field values. +// This allows a single-value return from this function, while still allowing error checks in the caller. +// Note that an input string which does not include exactly one Separator is not a valid input (as it could never +// have neem returned by String() ) +func NewNamespacedNameFromString(s string) NamespacedName { + nn := NamespacedName{} + result := strings.Split(s, string(Separator)) + if len(result) == 2 { + nn.Namespace = result[0] + nn.Name = result[1] + } + return nn +} diff --git a/vendor/k8s.io/apimachinery/pkg/types/nodename.go b/vendor/k8s.io/apimachinery/pkg/types/nodename.go new file mode 100644 index 000000000..fee348d7e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/nodename.go @@ -0,0 +1,43 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +// NodeName is a type that holds a api.Node's Name identifier. +// Being a type captures intent and helps make sure that the node name +// is not confused with similar concepts (the hostname, the cloud provider id, +// the cloud provider name etc) +// +// To clarify the various types: +// +// * Node.Name is the Name field of the Node in the API. This should be stored in a NodeName. +// Unfortunately, because Name is part of ObjectMeta, we can't store it as a NodeName at the API level. +// +// * Hostname is the hostname of the local machine (from uname -n). +// However, some components allow the user to pass in a --hostname-override flag, +// which will override this in most places. In the absence of anything more meaningful, +// kubelet will use Hostname as the Node.Name when it creates the Node. +// +// * The cloudproviders have the own names: GCE has InstanceName, AWS has InstanceId. +// +// For GCE, InstanceName is the Name of an Instance object in the GCE API. On GCE, Instance.Name becomes the +// Hostname, and thus it makes sense also to use it as the Node.Name. But that is GCE specific, and it is up +// to the cloudprovider how to do this mapping. +// +// For AWS, the InstanceID is not yet suitable for use as a Node.Name, so we actually use the +// PrivateDnsName for the Node.Name. And this is _not_ always the same as the hostname: if +// we are using a custom DHCP domain it won't be. +type NodeName string diff --git a/vendor/k8s.io/apimachinery/pkg/types/patch.go b/vendor/k8s.io/apimachinery/pkg/types/patch.go new file mode 100644 index 000000000..d522d1dbd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/patch.go @@ -0,0 +1,28 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +// Similarly to above, these are constants to support HTTP PATCH utilized by +// both the client and server that didn't make sense for a whole package to be +// dedicated to. +type PatchType string + +const ( + JSONPatchType PatchType = "application/json-patch+json" + MergePatchType PatchType = "application/merge-patch+json" + StrategicMergePatchType PatchType = "application/strategic-merge-patch+json" +) diff --git a/vendor/k8s.io/apimachinery/pkg/types/uid.go b/vendor/k8s.io/apimachinery/pkg/types/uid.go new file mode 100644 index 000000000..869339222 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/uid.go @@ -0,0 +1,22 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +// UID is a type that holds unique ID values, including UUIDs. Because we +// don't ONLY use UUIDs, this is an alias to string. Being a type captures +// intent and helps make sure that UIDs and names do not get conflated. +type UID string diff --git a/vendor/k8s.io/apimachinery/pkg/types/unix_user_id.go b/vendor/k8s.io/apimachinery/pkg/types/unix_user_id.go new file mode 100644 index 000000000..dc770c11e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/unix_user_id.go @@ -0,0 +1,23 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +// int64 is used as a safe bet against wrap-around (uid's are general +// int32) and to support uid_t -1, and -2. + +type UnixUserID int64 +type UnixGroupID int64 diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go new file mode 100644 index 000000000..c303a212a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go @@ -0,0 +1,327 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clock + +import ( + "sync" + "time" +) + +// Clock allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type Clock interface { + Now() time.Time + Since(time.Time) time.Duration + After(d time.Duration) <-chan time.Time + NewTimer(d time.Duration) Timer + Sleep(d time.Duration) + Tick(d time.Duration) <-chan time.Time +} + +var ( + _ = Clock(RealClock{}) + _ = Clock(&FakeClock{}) + _ = Clock(&IntervalClock{}) +) + +// RealClock really calls time.Now() +type RealClock struct{} + +// Now returns the current time. +func (RealClock) Now() time.Time { + return time.Now() +} + +// Since returns time since the specified timestamp. +func (RealClock) Since(ts time.Time) time.Duration { + return time.Since(ts) +} + +// Same as time.After(d). +func (RealClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +func (RealClock) NewTimer(d time.Duration) Timer { + return &realTimer{ + timer: time.NewTimer(d), + } +} + +func (RealClock) Tick(d time.Duration) <-chan time.Time { + return time.Tick(d) +} + +func (RealClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// FakeClock implements Clock, but returns an arbitrary time. +type FakeClock struct { + lock sync.RWMutex + time time.Time + + // waiters are waiting for the fake time to pass their specified time + waiters []fakeClockWaiter +} + +type fakeClockWaiter struct { + targetTime time.Time + stepInterval time.Duration + skipIfBlocked bool + destChan chan time.Time + fired bool +} + +func NewFakeClock(t time.Time) *FakeClock { + return &FakeClock{ + time: t, + } +} + +// Now returns f's time. +func (f *FakeClock) Now() time.Time { + f.lock.RLock() + defer f.lock.RUnlock() + return f.time +} + +// Since returns time since the time in f. +func (f *FakeClock) Since(ts time.Time) time.Duration { + f.lock.RLock() + defer f.lock.RUnlock() + return f.time.Sub(ts) +} + +// Fake version of time.After(d). +func (f *FakeClock) After(d time.Duration) <-chan time.Time { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + f.waiters = append(f.waiters, fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + }) + return ch +} + +// Fake version of time.NewTimer(d). +func (f *FakeClock) NewTimer(d time.Duration) Timer { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + timer := &fakeTimer{ + fakeClock: f, + waiter: fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + }, + } + f.waiters = append(f.waiters, timer.waiter) + return timer +} + +func (f *FakeClock) Tick(d time.Duration) <-chan time.Time { + f.lock.Lock() + defer f.lock.Unlock() + tickTime := f.time.Add(d) + ch := make(chan time.Time, 1) // hold one tick + f.waiters = append(f.waiters, fakeClockWaiter{ + targetTime: tickTime, + stepInterval: d, + skipIfBlocked: true, + destChan: ch, + }) + + return ch +} + +// Move clock by Duration, notify anyone that's called After, Tick, or NewTimer +func (f *FakeClock) Step(d time.Duration) { + f.lock.Lock() + defer f.lock.Unlock() + f.setTimeLocked(f.time.Add(d)) +} + +// Sets the time. +func (f *FakeClock) SetTime(t time.Time) { + f.lock.Lock() + defer f.lock.Unlock() + f.setTimeLocked(t) +} + +// Actually changes the time and checks any waiters. f must be write-locked. +func (f *FakeClock) setTimeLocked(t time.Time) { + f.time = t + newWaiters := make([]fakeClockWaiter, 0, len(f.waiters)) + for i := range f.waiters { + w := &f.waiters[i] + if !w.targetTime.After(t) { + + if w.skipIfBlocked { + select { + case w.destChan <- t: + w.fired = true + default: + } + } else { + w.destChan <- t + w.fired = true + } + + if w.stepInterval > 0 { + for !w.targetTime.After(t) { + w.targetTime = w.targetTime.Add(w.stepInterval) + } + newWaiters = append(newWaiters, *w) + } + + } else { + newWaiters = append(newWaiters, f.waiters[i]) + } + } + f.waiters = newWaiters +} + +// Returns true if After has been called on f but not yet satisfied (so you can +// write race-free tests). +func (f *FakeClock) HasWaiters() bool { + f.lock.RLock() + defer f.lock.RUnlock() + return len(f.waiters) > 0 +} + +func (f *FakeClock) Sleep(d time.Duration) { + f.Step(d) +} + +// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration +type IntervalClock struct { + Time time.Time + Duration time.Duration +} + +// Now returns i's time. +func (i *IntervalClock) Now() time.Time { + i.Time = i.Time.Add(i.Duration) + return i.Time +} + +// Since returns time since the time in i. +func (i *IntervalClock) Since(ts time.Time) time.Duration { + return i.Time.Sub(ts) +} + +// Unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) After(d time.Duration) <-chan time.Time { + panic("IntervalClock doesn't implement After") +} + +// Unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) NewTimer(d time.Duration) Timer { + panic("IntervalClock doesn't implement NewTimer") +} + +// Unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) Tick(d time.Duration) <-chan time.Time { + panic("IntervalClock doesn't implement Tick") +} + +func (*IntervalClock) Sleep(d time.Duration) { + panic("IntervalClock doesn't implement Sleep") +} + +// Timer allows for injecting fake or real timers into code that +// needs to do arbitrary things based on time. +type Timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +var ( + _ = Timer(&realTimer{}) + _ = Timer(&fakeTimer{}) +) + +// realTimer is backed by an actual time.Timer. +type realTimer struct { + timer *time.Timer +} + +// C returns the underlying timer's channel. +func (r *realTimer) C() <-chan time.Time { + return r.timer.C +} + +// Stop calls Stop() on the underlying timer. +func (r *realTimer) Stop() bool { + return r.timer.Stop() +} + +// Reset calls Reset() on the underlying timer. +func (r *realTimer) Reset(d time.Duration) bool { + return r.timer.Reset(d) +} + +// fakeTimer implements Timer based on a FakeClock. +type fakeTimer struct { + fakeClock *FakeClock + waiter fakeClockWaiter +} + +// C returns the channel that notifies when this timer has fired. +func (f *fakeTimer) C() <-chan time.Time { + return f.waiter.destChan +} + +// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise. +func (f *fakeTimer) Stop() bool { + f.fakeClock.lock.Lock() + defer f.fakeClock.lock.Unlock() + + newWaiters := make([]fakeClockWaiter, 0, len(f.fakeClock.waiters)) + for i := range f.fakeClock.waiters { + w := &f.fakeClock.waiters[i] + if w != &f.waiter { + newWaiters = append(newWaiters, *w) + } + } + + f.fakeClock.waiters = newWaiters + + return !f.waiter.fired +} + +// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet +// fired, or false otherwise. +func (f *fakeTimer) Reset(d time.Duration) bool { + f.fakeClock.lock.Lock() + defer f.fakeClock.lock.Unlock() + + active := !f.waiter.fired + + f.waiter.fired = false + f.waiter.targetTime = f.fakeClock.time.Add(d) + + return active +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go new file mode 100644 index 000000000..0f730875e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -0,0 +1,280 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package diff + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "sort" + "strings" + "text/tabwriter" + + "github.com/davecgh/go-spew/spew" + + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// StringDiff diffs a and b and returns a human readable diff. +func StringDiff(a, b string) string { + ba := []byte(a) + bb := []byte(b) + out := []byte{} + i := 0 + for ; i < len(ba) && i < len(bb); i++ { + if ba[i] != bb[i] { + break + } + out = append(out, ba[i]) + } + out = append(out, []byte("\n\nA: ")...) + out = append(out, ba[i:]...) + out = append(out, []byte("\n\nB: ")...) + out = append(out, bb[i:]...) + out = append(out, []byte("\n\n")...) + return string(out) +} + +// ObjectDiff writes the two objects out as JSON and prints out the identical part of +// the objects followed by the remaining part of 'a' and finally the remaining part of 'b'. +// For debugging tests. +func ObjectDiff(a, b interface{}) string { + ab, err := json.Marshal(a) + if err != nil { + panic(fmt.Sprintf("a: %v", err)) + } + bb, err := json.Marshal(b) + if err != nil { + panic(fmt.Sprintf("b: %v", err)) + } + return StringDiff(string(ab), string(bb)) +} + +// ObjectGoPrintDiff is like ObjectDiff, but uses go-spew to print the objects, +// which shows absolutely everything by recursing into every single pointer +// (go's %#v formatters OTOH stop at a certain point). This is needed when you +// can't figure out why reflect.DeepEqual is returning false and nothing is +// showing you differences. This will. +func ObjectGoPrintDiff(a, b interface{}) string { + s := spew.ConfigState{DisableMethods: true} + return StringDiff( + s.Sprintf("%#v", a), + s.Sprintf("%#v", b), + ) +} + +func ObjectReflectDiff(a, b interface{}) string { + vA, vB := reflect.ValueOf(a), reflect.ValueOf(b) + if vA.Type() != vB.Type() { + return fmt.Sprintf("type A %T and type B %T do not match", a, b) + } + diffs := objectReflectDiff(field.NewPath("object"), vA, vB) + if len(diffs) == 0 { + return "" + } + out := []string{""} + for _, d := range diffs { + out = append(out, + fmt.Sprintf("%s:", d.path), + limit(fmt.Sprintf(" a: %#v", d.a), 80), + limit(fmt.Sprintf(" b: %#v", d.b), 80), + ) + } + return strings.Join(out, "\n") +} + +func limit(s string, max int) string { + if len(s) > max { + return s[:max] + } + return s +} + +func public(s string) bool { + if len(s) == 0 { + return false + } + return s[:1] == strings.ToUpper(s[:1]) +} + +type diff struct { + path *field.Path + a, b interface{} +} + +type orderedDiffs []diff + +func (d orderedDiffs) Len() int { return len(d) } +func (d orderedDiffs) Swap(i, j int) { d[i], d[j] = d[j], d[i] } +func (d orderedDiffs) Less(i, j int) bool { + a, b := d[i].path.String(), d[j].path.String() + if a < b { + return true + } + return false +} + +func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff { + switch a.Type().Kind() { + case reflect.Struct: + var changes []diff + for i := 0; i < a.Type().NumField(); i++ { + if !public(a.Type().Field(i).Name) { + if reflect.DeepEqual(a.Interface(), b.Interface()) { + continue + } + return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} + } + if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 { + changes = append(changes, sub...) + } else { + if !reflect.DeepEqual(a.Field(i).Interface(), b.Field(i).Interface()) { + changes = append(changes, diff{path: path, a: a.Field(i).Interface(), b: b.Field(i).Interface()}) + } + } + } + return changes + case reflect.Ptr, reflect.Interface: + if a.IsNil() || b.IsNil() { + switch { + case a.IsNil() && b.IsNil(): + return nil + case a.IsNil(): + return []diff{{path: path, a: nil, b: b.Interface()}} + default: + return []diff{{path: path, a: a.Interface(), b: nil}} + } + } + return objectReflectDiff(path, a.Elem(), b.Elem()) + case reflect.Chan: + if !reflect.DeepEqual(a.Interface(), b.Interface()) { + return []diff{{path: path, a: a.Interface(), b: b.Interface()}} + } + return nil + case reflect.Slice: + lA, lB := a.Len(), b.Len() + l := lA + if lB < lA { + l = lB + } + if lA == lB && lA == 0 { + if a.IsNil() != b.IsNil() { + return []diff{{path: path, a: a.Interface(), b: b.Interface()}} + } + return nil + } + for i := 0; i < l; i++ { + if !reflect.DeepEqual(a.Index(i), b.Index(i)) { + return objectReflectDiff(path.Index(i), a.Index(i), b.Index(i)) + } + } + var diffs []diff + for i := l; i < lA; i++ { + diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil}) + } + for i := l; i < lB; i++ { + diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)}) + } + if len(diffs) == 0 { + diffs = append(diffs, diff{path: path, a: a, b: b}) + } + return diffs + case reflect.Map: + if reflect.DeepEqual(a.Interface(), b.Interface()) { + return nil + } + aKeys := make(map[interface{}]interface{}) + for _, key := range a.MapKeys() { + aKeys[key.Interface()] = a.MapIndex(key).Interface() + } + var missing []diff + for _, key := range b.MapKeys() { + if _, ok := aKeys[key.Interface()]; ok { + delete(aKeys, key.Interface()) + if reflect.DeepEqual(a.MapIndex(key).Interface(), b.MapIndex(key).Interface()) { + continue + } + missing = append(missing, objectReflectDiff(path.Key(fmt.Sprintf("%s", key.Interface())), a.MapIndex(key), b.MapIndex(key))...) + continue + } + missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key.Interface())), a: nil, b: b.MapIndex(key).Interface()}) + } + for key, value := range aKeys { + missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key)), a: value, b: nil}) + } + if len(missing) == 0 { + missing = append(missing, diff{path: path, a: a.Interface(), b: b.Interface()}) + } + sort.Sort(orderedDiffs(missing)) + return missing + default: + if reflect.DeepEqual(a.Interface(), b.Interface()) { + return nil + } + if !a.CanInterface() { + return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} + } + return []diff{{path: path, a: a.Interface(), b: b.Interface()}} + } +} + +// ObjectGoPrintSideBySide prints a and b as textual dumps side by side, +// enabling easy visual scanning for mismatches. +func ObjectGoPrintSideBySide(a, b interface{}) string { + s := spew.ConfigState{ + Indent: " ", + // Extra deep spew. + DisableMethods: true, + } + sA := s.Sdump(a) + sB := s.Sdump(b) + + linesA := strings.Split(sA, "\n") + linesB := strings.Split(sB, "\n") + width := 0 + for _, s := range linesA { + l := len(s) + if l > width { + width = l + } + } + for _, s := range linesB { + l := len(s) + if l > width { + width = l + } + } + buf := &bytes.Buffer{} + w := tabwriter.NewWriter(buf, width, 0, 1, ' ', 0) + max := len(linesA) + if len(linesB) > max { + max = len(linesB) + } + for i := 0; i < max; i++ { + var a, b string + if i < len(linesA) { + a = linesA[i] + } + if i < len(linesB) { + b = linesB[i] + } + fmt.Fprintf(w, "%s\t%s\n", a, b) + } + w.Flush() + return buf.String() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go new file mode 100644 index 000000000..5d4d6250a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package errors implements various utility functions and types around errors. +package errors // import "k8s.io/apimachinery/pkg/util/errors" diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go new file mode 100644 index 000000000..bdea0e16c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -0,0 +1,201 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "errors" + "fmt" +) + +// MessagesgCountMap contains occurance for each error message. +type MessageCountMap map[string]int + +// Aggregate represents an object that contains multiple errors, but does not +// necessarily have singular semantic meaning. +type Aggregate interface { + error + Errors() []error +} + +// NewAggregate converts a slice of errors into an Aggregate interface, which +// is itself an implementation of the error interface. If the slice is empty, +// this returns nil. +// It will check if any of the element of input error list is nil, to avoid +// nil pointer panic when call Error(). +func NewAggregate(errlist []error) Aggregate { + if len(errlist) == 0 { + return nil + } + // In case of input error list contains nil + var errs []error + for _, e := range errlist { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregate(errs) +} + +// This helper implements the error and Errors interfaces. Keeping it private +// prevents people from making an aggregate of 0 errors, which is not +// an error, but does satisfy the error interface. +type aggregate []error + +// Error is part of the error interface. +func (agg aggregate) Error() string { + if len(agg) == 0 { + // This should never happen, really. + return "" + } + if len(agg) == 1 { + return agg[0].Error() + } + result := fmt.Sprintf("[%s", agg[0].Error()) + for i := 1; i < len(agg); i++ { + result += fmt.Sprintf(", %s", agg[i].Error()) + } + result += "]" + return result +} + +// Errors is part of the Aggregate interface. +func (agg aggregate) Errors() []error { + return []error(agg) +} + +// Matcher is used to match errors. Returns true if the error matches. +type Matcher func(error) bool + +// FilterOut removes all errors that match any of the matchers from the input +// error. If the input is a singular error, only that error is tested. If the +// input implements the Aggregate interface, the list of errors will be +// processed recursively. +// +// This can be used, for example, to remove known-OK errors (such as io.EOF or +// os.PathNotFound) from a list of errors. +func FilterOut(err error, fns ...Matcher) error { + if err == nil { + return nil + } + if agg, ok := err.(Aggregate); ok { + return NewAggregate(filterErrors(agg.Errors(), fns...)) + } + if !matchesError(err, fns...) { + return err + } + return nil +} + +// matchesError returns true if any Matcher returns true +func matchesError(err error, fns ...Matcher) bool { + for _, fn := range fns { + if fn(err) { + return true + } + } + return false +} + +// filterErrors returns any errors (or nested errors, if the list contains +// nested Errors) for which all fns return false. If no errors +// remain a nil list is returned. The resulting silec will have all +// nested slices flattened as a side effect. +func filterErrors(list []error, fns ...Matcher) []error { + result := []error{} + for _, err := range list { + r := FilterOut(err, fns...) + if r != nil { + result = append(result, r) + } + } + return result +} + +// Flatten takes an Aggregate, which may hold other Aggregates in arbitrary +// nesting, and flattens them all into a single Aggregate, recursively. +func Flatten(agg Aggregate) Aggregate { + result := []error{} + if agg == nil { + return nil + } + for _, err := range agg.Errors() { + if a, ok := err.(Aggregate); ok { + r := Flatten(a) + if r != nil { + result = append(result, r.Errors()...) + } + } else { + if err != nil { + result = append(result, err) + } + } + } + return NewAggregate(result) +} + +// CreateAggregateFromMessageCountMap converts MessageCountMap Aggregate +func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate { + if m == nil { + return nil + } + result := make([]error, 0, len(m)) + for errStr, count := range m { + var countStr string + if count > 1 { + countStr = fmt.Sprintf(" (repeated %v times)", count) + } + result = append(result, fmt.Errorf("%v%v", errStr, countStr)) + } + return NewAggregate(result) +} + +// Reduce will return err or, if err is an Aggregate and only has one item, +// the first item in the aggregate. +func Reduce(err error) error { + if agg, ok := err.(Aggregate); ok && err != nil { + switch len(agg.Errors()) { + case 1: + return agg.Errors()[0] + case 0: + return nil + } + } + return err +} + +// AggregateGoroutines runs the provided functions in parallel, stuffing all +// non-nil errors into the returned Aggregate. +// Returns nil if all the functions complete successfully. +func AggregateGoroutines(funcs ...func() error) Aggregate { + errChan := make(chan error, len(funcs)) + for _, f := range funcs { + go func(f func() error) { errChan <- f() }(f) + } + errs := make([]error, 0) + for i := 0; i < cap(errChan); i++ { + if err := <-errChan; err != nil { + errs = append(errs, err) + } + } + return NewAggregate(errs) +} + +// ErrPreconditionViolated is returned when the precondition is violated +var ErrPreconditionViolated = errors.New("precondition is violated") diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go new file mode 100644 index 000000000..066680f44 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -0,0 +1,167 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package framer implements simple frame decoding techniques for an io.ReadCloser +package framer + +import ( + "encoding/binary" + "encoding/json" + "io" +) + +type lengthDelimitedFrameWriter struct { + w io.Writer + h [4]byte +} + +func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer { + return &lengthDelimitedFrameWriter{w: w} +} + +// Write writes a single frame to the nested writer, prepending it with the length in +// in bytes of data (as a 4 byte, bigendian uint32). +func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) { + binary.BigEndian.PutUint32(w.h[:], uint32(len(data))) + n, err := w.w.Write(w.h[:]) + if err != nil { + return 0, err + } + if n != len(w.h) { + return 0, io.ErrShortWrite + } + return w.w.Write(data) +} + +type lengthDelimitedFrameReader struct { + r io.ReadCloser + remaining int +} + +// NewLengthDelimitedFrameReader returns an io.Reader that will decode length-prefixed +// frames off of a stream. +// +// The protocol is: +// +// stream: message ... +// message: prefix body +// prefix: 4 byte uint32 in BigEndian order, denotes length of body +// body: bytes (0..prefix) +// +// If the buffer passed to Read is not long enough to contain an entire frame, io.ErrShortRead +// will be returned along with the number of bytes read. +func NewLengthDelimitedFrameReader(r io.ReadCloser) io.ReadCloser { + return &lengthDelimitedFrameReader{r: r} +} + +// Read attempts to read an entire frame into data. If that is not possible, io.ErrShortBuffer +// is returned and subsequent calls will attempt to read the last frame. A frame is complete when +// err is nil. +func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) { + if r.remaining <= 0 { + header := [4]byte{} + n, err := io.ReadAtLeast(r.r, header[:4], 4) + if err != nil { + return 0, err + } + if n != 4 { + return 0, io.ErrUnexpectedEOF + } + frameLength := int(binary.BigEndian.Uint32(header[:])) + r.remaining = frameLength + } + + expect := r.remaining + max := expect + if max > len(data) { + max = len(data) + } + n, err := io.ReadAtLeast(r.r, data[:max], int(max)) + r.remaining -= n + if err == io.ErrShortBuffer || r.remaining > 0 { + return n, io.ErrShortBuffer + } + if err != nil { + return n, err + } + if n != expect { + return n, io.ErrUnexpectedEOF + } + + return n, nil +} + +func (r *lengthDelimitedFrameReader) Close() error { + return r.r.Close() +} + +type jsonFrameReader struct { + r io.ReadCloser + decoder *json.Decoder + remaining []byte +} + +// NewJSONFramedReader returns an io.Reader that will decode individual JSON objects off +// of a wire. +// +// The boundaries between each frame are valid JSON objects. A JSON parsing error will terminate +// the read. +func NewJSONFramedReader(r io.ReadCloser) io.ReadCloser { + return &jsonFrameReader{ + r: r, + decoder: json.NewDecoder(r), + } +} + +// ReadFrame decodes the next JSON object in the stream, or returns an error. The returned +// byte slice will be modified the next time ReadFrame is invoked and should not be altered. +func (r *jsonFrameReader) Read(data []byte) (int, error) { + // Return whatever remaining data exists from an in progress frame + if n := len(r.remaining); n > 0 { + if n <= len(data) { + data = append(data[0:0], r.remaining...) + r.remaining = nil + return n, nil + } + + n = len(data) + data = append(data[0:0], r.remaining[:n]...) + r.remaining = r.remaining[n:] + return n, io.ErrShortBuffer + } + + // RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see + // data written to data, or be larger than data and a different array. + n := len(data) + m := json.RawMessage(data[:0]) + if err := r.decoder.Decode(&m); err != nil { + return 0, err + } + + // If capacity of data is less than length of the message, decoder will allocate a new slice + // and set m to it, which means we need to copy the partial result back into data and preserve + // the remaining result for subsequent reads. + if len(m) > n { + data = append(data[0:0], m[:n]...) + r.remaining = m[n:] + return n, io.ErrShortBuffer + } + return len(m), nil +} + +func (r *jsonFrameReader) Close() error { + return r.r.Close() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go new file mode 100644 index 000000000..5893df5bd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package httpstream adds multiplexed streaming support to HTTP requests and +// responses via connection upgrades. +package httpstream // import "k8s.io/apimachinery/pkg/util/httpstream" diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go new file mode 100644 index 000000000..7c9b791d4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go @@ -0,0 +1,149 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httpstream + +import ( + "fmt" + "io" + "net/http" + "strings" + "time" +) + +const ( + HeaderConnection = "Connection" + HeaderUpgrade = "Upgrade" + HeaderProtocolVersion = "X-Stream-Protocol-Version" + HeaderAcceptedProtocolVersions = "X-Accepted-Stream-Protocol-Versions" +) + +// NewStreamHandler defines a function that is called when a new Stream is +// received. If no error is returned, the Stream is accepted; otherwise, +// the stream is rejected. After the reply frame has been sent, replySent is closed. +type NewStreamHandler func(stream Stream, replySent <-chan struct{}) error + +// NoOpNewStreamHandler is a stream handler that accepts a new stream and +// performs no other logic. +func NoOpNewStreamHandler(stream Stream, replySent <-chan struct{}) error { return nil } + +// Dialer knows how to open a streaming connection to a server. +type Dialer interface { + + // Dial opens a streaming connection to a server using one of the protocols + // specified (in order of most preferred to least preferred). + Dial(protocols ...string) (Connection, string, error) +} + +// UpgradeRoundTripper is a type of http.RoundTripper that is able to upgrade +// HTTP requests to support multiplexed bidirectional streams. After RoundTrip() +// is invoked, if the upgrade is successful, clients may retrieve the upgraded +// connection by calling UpgradeRoundTripper.Connection(). +type UpgradeRoundTripper interface { + http.RoundTripper + // NewConnection validates the response and creates a new Connection. + NewConnection(resp *http.Response) (Connection, error) +} + +// ResponseUpgrader knows how to upgrade HTTP requests and responses to +// add streaming support to them. +type ResponseUpgrader interface { + // UpgradeResponse upgrades an HTTP response to one that supports multiplexed + // streams. newStreamHandler will be called asynchronously whenever the + // other end of the upgraded connection creates a new stream. + UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler NewStreamHandler) Connection +} + +// Connection represents an upgraded HTTP connection. +type Connection interface { + // CreateStream creates a new Stream with the supplied headers. + CreateStream(headers http.Header) (Stream, error) + // Close resets all streams and closes the connection. + Close() error + // CloseChan returns a channel that is closed when the underlying connection is closed. + CloseChan() <-chan bool + // SetIdleTimeout sets the amount of time the connection may remain idle before + // it is automatically closed. + SetIdleTimeout(timeout time.Duration) +} + +// Stream represents a bidirectional communications channel that is part of an +// upgraded connection. +type Stream interface { + io.ReadWriteCloser + // Reset closes both directions of the stream, indicating that neither client + // or server can use it any more. + Reset() error + // Headers returns the headers used to create the stream. + Headers() http.Header + // Identifier returns the stream's ID. + Identifier() uint32 +} + +// IsUpgradeRequest returns true if the given request is a connection upgrade request +func IsUpgradeRequest(req *http.Request) bool { + for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] { + if strings.Contains(strings.ToLower(h), strings.ToLower(HeaderUpgrade)) { + return true + } + } + return false +} + +func negotiateProtocol(clientProtocols, serverProtocols []string) string { + for i := range clientProtocols { + for j := range serverProtocols { + if clientProtocols[i] == serverProtocols[j] { + return clientProtocols[i] + } + } + } + return "" +} + +// Handshake performs a subprotocol negotiation. If the client did request a +// subprotocol, Handshake will select the first common value found in +// serverProtocols. If a match is found, Handshake adds a response header +// indicating the chosen subprotocol. If no match is found, HTTP forbidden is +// returned, along with a response header containing the list of protocols the +// server can accept. +func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) { + clientProtocols := req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)] + if len(clientProtocols) == 0 { + // Kube 1.0 clients didn't support subprotocol negotiation. + // TODO require clientProtocols once Kube 1.0 is no longer supported + return "", nil + } + + if len(serverProtocols) == 0 { + // Kube 1.0 servers didn't support subprotocol negotiation. This is mainly for testing. + // TODO require serverProtocols once Kube 1.0 is no longer supported + return "", nil + } + + negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols) + if len(negotiatedProtocol) == 0 { + w.WriteHeader(http.StatusForbidden) + for i := range serverProtocols { + w.Header().Add(HeaderAcceptedProtocolVersions, serverProtocols[i]) + } + fmt.Fprintf(w, "unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols) + return "", fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server supports %v", clientProtocols, serverProtocols) + } + + w.Header().Add(HeaderProtocolVersion, negotiatedProtocol) + return negotiatedProtocol, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go new file mode 100644 index 000000000..3dc8e23ae --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go @@ -0,0 +1,145 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "net" + "net/http" + "sync" + "time" + + "github.com/docker/spdystream" + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/util/httpstream" +) + +// connection maintains state about a spdystream.Connection and its associated +// streams. +type connection struct { + conn *spdystream.Connection + streams []httpstream.Stream + streamLock sync.Mutex + newStreamHandler httpstream.NewStreamHandler +} + +// NewClientConnection creates a new SPDY client connection. +func NewClientConnection(conn net.Conn) (httpstream.Connection, error) { + spdyConn, err := spdystream.NewConnection(conn, false) + if err != nil { + defer conn.Close() + return nil, err + } + + return newConnection(spdyConn, httpstream.NoOpNewStreamHandler), nil +} + +// NewServerConnection creates a new SPDY server connection. newStreamHandler +// will be invoked when the server receives a newly created stream from the +// client. +func NewServerConnection(conn net.Conn, newStreamHandler httpstream.NewStreamHandler) (httpstream.Connection, error) { + spdyConn, err := spdystream.NewConnection(conn, true) + if err != nil { + defer conn.Close() + return nil, err + } + + return newConnection(spdyConn, newStreamHandler), nil +} + +// newConnection returns a new connection wrapping conn. newStreamHandler +// will be invoked when the server receives a newly created stream from the +// client. +func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection { + c := &connection{conn: conn, newStreamHandler: newStreamHandler} + go conn.Serve(c.newSpdyStream) + return c +} + +// createStreamResponseTimeout indicates how long to wait for the other side to +// acknowledge the new stream before timing out. +const createStreamResponseTimeout = 30 * time.Second + +// Close first sends a reset for all of the connection's streams, and then +// closes the underlying spdystream.Connection. +func (c *connection) Close() error { + c.streamLock.Lock() + for _, s := range c.streams { + // calling Reset instead of Close ensures that all streams are fully torn down + s.Reset() + } + c.streams = make([]httpstream.Stream, 0) + c.streamLock.Unlock() + + // now that all streams are fully torn down, it's safe to call close on the underlying connection, + // which should be able to terminate immediately at this point, instead of waiting for any + // remaining graceful stream termination. + return c.conn.Close() +} + +// CreateStream creates a new stream with the specified headers and registers +// it with the connection. +func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) { + stream, err := c.conn.CreateStream(headers, nil, false) + if err != nil { + return nil, err + } + if err = stream.WaitTimeout(createStreamResponseTimeout); err != nil { + return nil, err + } + + c.registerStream(stream) + return stream, nil +} + +// registerStream adds the stream s to the connection's list of streams that +// it owns. +func (c *connection) registerStream(s httpstream.Stream) { + c.streamLock.Lock() + c.streams = append(c.streams, s) + c.streamLock.Unlock() +} + +// CloseChan returns a channel that, when closed, indicates that the underlying +// spdystream.Connection has been closed. +func (c *connection) CloseChan() <-chan bool { + return c.conn.CloseChan() +} + +// newSpdyStream is the internal new stream handler used by spdystream.Connection.Serve. +// It calls connection's newStreamHandler, giving it the opportunity to accept or reject +// the stream. If newStreamHandler returns an error, the stream is rejected. If not, the +// stream is accepted and registered with the connection. +func (c *connection) newSpdyStream(stream *spdystream.Stream) { + replySent := make(chan struct{}) + err := c.newStreamHandler(stream, replySent) + rejectStream := (err != nil) + if rejectStream { + glog.Warningf("Stream rejected: %v", err) + stream.Reset() + return + } + + c.registerStream(stream) + stream.SendReply(http.Header{}, rejectStream) + close(replySent) +} + +// SetIdleTimeout sets the amount of time the connection may remain idle before +// it is automatically closed. +func (c *connection) SetIdleTimeout(timeout time.Duration) { + c.conn.SetIdleTimeout(timeout) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go new file mode 100644 index 000000000..ad300e28b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go @@ -0,0 +1,322 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "bufio" + "bytes" + "crypto/tls" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httputil" + "net/url" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/httpstream" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/third_party/forked/golang/netutil" +) + +// SpdyRoundTripper knows how to upgrade an HTTP request to one that supports +// multiplexed streams. After RoundTrip() is invoked, Conn will be set +// and usable. SpdyRoundTripper implements the UpgradeRoundTripper interface. +type SpdyRoundTripper struct { + //tlsConfig holds the TLS configuration settings to use when connecting + //to the remote server. + tlsConfig *tls.Config + + /* TODO according to http://golang.org/pkg/net/http/#RoundTripper, a RoundTripper + must be safe for use by multiple concurrent goroutines. If this is absolutely + necessary, we could keep a map from http.Request to net.Conn. In practice, + a client will create an http.Client, set the transport to a new insteace of + SpdyRoundTripper, and use it a single time, so this hopefully won't be an issue. + */ + // conn is the underlying network connection to the remote server. + conn net.Conn + + // Dialer is the dialer used to connect. Used if non-nil. + Dialer *net.Dialer + + // proxier knows which proxy to use given a request, defaults to http.ProxyFromEnvironment + // Used primarily for mocking the proxy discovery in tests. + proxier func(req *http.Request) (*url.URL, error) + + // followRedirects indicates if the round tripper should examine responses for redirects and + // follow them. + followRedirects bool +} + +var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{} +var _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{} +var _ utilnet.Dialer = &SpdyRoundTripper{} + +// NewRoundTripper creates a new SpdyRoundTripper that will use +// the specified tlsConfig. +func NewRoundTripper(tlsConfig *tls.Config, followRedirects bool) httpstream.UpgradeRoundTripper { + return NewSpdyRoundTripper(tlsConfig, followRedirects) +} + +// NewSpdyRoundTripper creates a new SpdyRoundTripper that will use +// the specified tlsConfig. This function is mostly meant for unit tests. +func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects bool) *SpdyRoundTripper { + return &SpdyRoundTripper{tlsConfig: tlsConfig, followRedirects: followRedirects} +} + +// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during +// proxying with a spdy roundtripper. +func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config { + return s.tlsConfig +} + +// Dial implements k8s.io/apimachinery/pkg/util/net.Dialer. +func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) { + conn, err := s.dial(req) + if err != nil { + return nil, err + } + + if err := req.Write(conn); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +// dial dials the host specified by req, using TLS if appropriate, optionally +// using a proxy server if one is configured via environment variables. +func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) { + proxier := s.proxier + if proxier == nil { + proxier = http.ProxyFromEnvironment + } + proxyURL, err := proxier(req) + if err != nil { + return nil, err + } + + if proxyURL == nil { + return s.dialWithoutProxy(req.URL) + } + + // ensure we use a canonical host with proxyReq + targetHost := netutil.CanonicalAddr(req.URL) + + // proxying logic adapted from http://blog.h6t.eu/post/74098062923/golang-websocket-with-http-proxy-support + proxyReq := http.Request{ + Method: "CONNECT", + URL: &url.URL{}, + Host: targetHost, + } + + if pa := s.proxyAuth(proxyURL); pa != "" { + proxyReq.Header = http.Header{} + proxyReq.Header.Set("Proxy-Authorization", pa) + } + + proxyDialConn, err := s.dialWithoutProxy(proxyURL) + if err != nil { + return nil, err + } + + proxyClientConn := httputil.NewProxyClientConn(proxyDialConn, nil) + _, err = proxyClientConn.Do(&proxyReq) + if err != nil && err != httputil.ErrPersistEOF { + return nil, err + } + + rwc, _ := proxyClientConn.Hijack() + + if req.URL.Scheme != "https" { + return rwc, nil + } + + host, _, err := net.SplitHostPort(targetHost) + if err != nil { + return nil, err + } + + if s.tlsConfig == nil { + s.tlsConfig = &tls.Config{} + } + + if len(s.tlsConfig.ServerName) == 0 { + s.tlsConfig.ServerName = host + } + + tlsConn := tls.Client(rwc, s.tlsConfig) + + // need to manually call Handshake() so we can call VerifyHostname() below + if err := tlsConn.Handshake(); err != nil { + return nil, err + } + + // Return if we were configured to skip validation + if s.tlsConfig != nil && s.tlsConfig.InsecureSkipVerify { + return tlsConn, nil + } + + if err := tlsConn.VerifyHostname(host); err != nil { + return nil, err + } + + return tlsConn, nil +} + +// dialWithoutProxy dials the host specified by url, using TLS if appropriate. +func (s *SpdyRoundTripper) dialWithoutProxy(url *url.URL) (net.Conn, error) { + dialAddr := netutil.CanonicalAddr(url) + + if url.Scheme == "http" { + if s.Dialer == nil { + return net.Dial("tcp", dialAddr) + } else { + return s.Dialer.Dial("tcp", dialAddr) + } + } + + // TODO validate the TLSClientConfig is set up? + var conn *tls.Conn + var err error + if s.Dialer == nil { + conn, err = tls.Dial("tcp", dialAddr, s.tlsConfig) + } else { + conn, err = tls.DialWithDialer(s.Dialer, "tcp", dialAddr, s.tlsConfig) + } + if err != nil { + return nil, err + } + + // Return if we were configured to skip validation + if s.tlsConfig != nil && s.tlsConfig.InsecureSkipVerify { + return conn, nil + } + + host, _, err := net.SplitHostPort(dialAddr) + if err != nil { + return nil, err + } + err = conn.VerifyHostname(host) + if err != nil { + return nil, err + } + + return conn, nil +} + +// proxyAuth returns, for a given proxy URL, the value to be used for the Proxy-Authorization header +func (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string { + if proxyURL == nil || proxyURL.User == nil { + return "" + } + credentials := proxyURL.User.String() + encodedAuth := base64.StdEncoding.EncodeToString([]byte(credentials)) + return fmt.Sprintf("Basic %s", encodedAuth) +} + +// RoundTrip executes the Request and upgrades it. After a successful upgrade, +// clients may call SpdyRoundTripper.Connection() to retrieve the upgraded +// connection. +func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + header := utilnet.CloneHeader(req.Header) + header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade) + header.Add(httpstream.HeaderUpgrade, HeaderSpdy31) + + var ( + conn net.Conn + rawResponse []byte + err error + ) + + if s.followRedirects { + conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s) + } else { + clone := utilnet.CloneRequest(req) + clone.Header = header + conn, err = s.Dial(clone) + } + if err != nil { + return nil, err + } + + responseReader := bufio.NewReader( + io.MultiReader( + bytes.NewBuffer(rawResponse), + conn, + ), + ) + + resp, err := http.ReadResponse(responseReader, nil) + if err != nil { + if conn != nil { + conn.Close() + } + return nil, err + } + + s.conn = conn + + return resp, nil +} + +// NewConnection validates the upgrade response, creating and returning a new +// httpstream.Connection if there were no errors. +func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) { + connectionHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderConnection)) + upgradeHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderUpgrade)) + if (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) { + defer resp.Body.Close() + responseError := "" + responseErrorBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + responseError = "unable to read error from server response" + } else { + // TODO: I don't belong here, I should be abstracted from this class + if obj, _, err := statusCodecs.UniversalDecoder().Decode(responseErrorBytes, nil, &metav1.Status{}); err == nil { + if status, ok := obj.(*metav1.Status); ok { + return nil, &apierrors.StatusError{ErrStatus: *status} + } + } + responseError = string(responseErrorBytes) + responseError = strings.TrimSpace(responseError) + } + + return nil, fmt.Errorf("unable to upgrade connection: %s", responseError) + } + + return NewClientConnection(s.conn) +} + +// statusScheme is private scheme for the decoding here until someone fixes the TODO in NewConnection +var statusScheme = runtime.NewScheme() + +// ParameterCodec knows about query parameters used with the meta v1 API spec. +var statusCodecs = serializer.NewCodecFactory(statusScheme) + +func init() { + statusScheme.AddUnversionedTypes(metav1.SchemeGroupVersion, + &metav1.Status{}, + ) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go new file mode 100644 index 000000000..13353988f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go @@ -0,0 +1,107 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "bufio" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync/atomic" + + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/runtime" +) + +const HeaderSpdy31 = "SPDY/3.1" + +// responseUpgrader knows how to upgrade HTTP responses. It +// implements the httpstream.ResponseUpgrader interface. +type responseUpgrader struct { +} + +// connWrapper is used to wrap a hijacked connection and its bufio.Reader. All +// calls will be handled directly by the underlying net.Conn with the exception +// of Read and Close calls, which will consider data in the bufio.Reader. This +// ensures that data already inside the used bufio.Reader instance is also +// read. +type connWrapper struct { + net.Conn + closed int32 + bufReader *bufio.Reader +} + +func (w *connWrapper) Read(b []byte) (n int, err error) { + if atomic.LoadInt32(&w.closed) == 1 { + return 0, io.EOF + } + return w.bufReader.Read(b) +} + +func (w *connWrapper) Close() error { + err := w.Conn.Close() + atomic.StoreInt32(&w.closed, 1) + return err +} + +// NewResponseUpgrader returns a new httpstream.ResponseUpgrader that is +// capable of upgrading HTTP responses using SPDY/3.1 via the +// spdystream package. +func NewResponseUpgrader() httpstream.ResponseUpgrader { + return responseUpgrader{} +} + +// UpgradeResponse upgrades an HTTP response to one that supports multiplexed +// streams. newStreamHandler will be called synchronously whenever the +// other end of the upgraded connection creates a new stream. +func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection { + connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection)) + upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade)) + if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "unable to upgrade: missing upgrade headers in request: %#v", req.Header) + return nil + } + + hijacker, ok := w.(http.Hijacker) + if !ok { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "unable to upgrade: unable to hijack response") + return nil + } + + w.Header().Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade) + w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31) + w.WriteHeader(http.StatusSwitchingProtocols) + + conn, bufrw, err := hijacker.Hijack() + if err != nil { + runtime.HandleError(fmt.Errorf("unable to upgrade: error hijacking response: %v", err)) + return nil + } + + connWithBuf := &connWrapper{Conn: conn, bufReader: bufrw.Reader} + spdyConn, err := NewServerConnection(connWithBuf, newStreamHandler) + if err != nil { + runtime.HandleError(fmt.Errorf("unable to upgrade: error creating SPDY server connection: %v", err)) + return nil + } + + return spdyConn +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go new file mode 100644 index 000000000..433dfa5cd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -0,0 +1,381 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +// DO NOT EDIT! + +/* + Package intstr is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto + + It has these top-level messages: + IntOrString +*/ +package intstr + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *IntOrString) Reset() { *m = IntOrString{} } +func (*IntOrString) ProtoMessage() {} +func (*IntOrString) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func init() { + proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString") +} +func (m *IntOrString) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal))) + i += copy(dAtA[i:], m.StrVal) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *IntOrString) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Type)) + n += 1 + sovGenerated(uint64(m.IntVal)) + l = len(m.StrVal) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *IntOrString) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IntOrString: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType) + } + m.IntVal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IntVal |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StrVal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, + 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, + 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, + 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, + 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, + 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, + 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, + 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, + 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, + 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, + 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, + 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, + 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, + 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, + 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, + 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, + 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, + 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, + 0x64, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto new file mode 100644 index 000000000..cccaf6f68 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.util.intstr; + +// Package-wide variables from generator "generated". +option go_package = "intstr"; + +// IntOrString is a type that can hold an int32 or a string. When used in +// JSON or YAML marshalling and unmarshalling, it produces or consumes the +// inner type. This allows you to have, for example, a JSON field that can +// accept a name or number. +// TODO: Rename to Int32OrString +// +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +message IntOrString { + optional int64 type = 1; + + optional int32 intVal = 2; + + optional string strVal = 3; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go new file mode 100644 index 000000000..02586b348 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -0,0 +1,177 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package intstr + +import ( + "encoding/json" + "fmt" + "math" + "runtime/debug" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/openapi" + + "github.com/go-openapi/spec" + "github.com/golang/glog" + "github.com/google/gofuzz" +) + +// IntOrString is a type that can hold an int32 or a string. When used in +// JSON or YAML marshalling and unmarshalling, it produces or consumes the +// inner type. This allows you to have, for example, a JSON field that can +// accept a name or number. +// TODO: Rename to Int32OrString +// +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +type IntOrString struct { + Type Type `protobuf:"varint,1,opt,name=type,casttype=Type"` + IntVal int32 `protobuf:"varint,2,opt,name=intVal"` + StrVal string `protobuf:"bytes,3,opt,name=strVal"` +} + +// Type represents the stored type of IntOrString. +type Type int + +const ( + Int Type = iota // The IntOrString holds an int. + String // The IntOrString holds a string. +) + +// FromInt creates an IntOrString object with an int32 value. It is +// your responsibility not to call this method with a value greater +// than int32. +// TODO: convert to (val int32) +func FromInt(val int) IntOrString { + if val > math.MaxInt32 || val < math.MinInt32 { + glog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) + } + return IntOrString{Type: Int, IntVal: int32(val)} +} + +// FromString creates an IntOrString object with a string value. +func FromString(val string) IntOrString { + return IntOrString{Type: String, StrVal: val} +} + +// Parse the given string and try to convert it to an integer before +// setting it as a string value. +func Parse(val string) IntOrString { + i, err := strconv.Atoi(val) + if err != nil { + return FromString(val) + } + return FromInt(i) +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (intstr *IntOrString) UnmarshalJSON(value []byte) error { + if value[0] == '"' { + intstr.Type = String + return json.Unmarshal(value, &intstr.StrVal) + } + intstr.Type = Int + return json.Unmarshal(value, &intstr.IntVal) +} + +// String returns the string value, or the Itoa of the int value. +func (intstr *IntOrString) String() string { + if intstr.Type == String { + return intstr.StrVal + } + return strconv.Itoa(intstr.IntValue()) +} + +// IntValue returns the IntVal if type Int, or if +// it is a String, will attempt a conversion to int. +func (intstr *IntOrString) IntValue() int { + if intstr.Type == String { + i, _ := strconv.Atoi(intstr.StrVal) + return i + } + return int(intstr.IntVal) +} + +// MarshalJSON implements the json.Marshaller interface. +func (intstr IntOrString) MarshalJSON() ([]byte, error) { + switch intstr.Type { + case Int: + return json.Marshal(intstr.IntVal) + case String: + return json.Marshal(intstr.StrVal) + default: + return []byte{}, fmt.Errorf("impossible IntOrString.Type") + } +} + +func (_ IntOrString) OpenAPIDefinition() openapi.OpenAPIDefinition { + return openapi.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "int-or-string", + }, + }, + } +} + +func (intstr *IntOrString) Fuzz(c fuzz.Continue) { + if intstr == nil { + return + } + if c.RandBool() { + intstr.Type = Int + c.Fuzz(&intstr.IntVal) + intstr.StrVal = "" + } else { + intstr.Type = String + intstr.IntVal = 0 + c.Fuzz(&intstr.StrVal) + } +} + +func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) { + value, isPercent, err := getIntOrPercentValue(intOrPercent) + if err != nil { + return 0, fmt.Errorf("invalid value for IntOrString: %v", err) + } + if isPercent { + if roundUp { + value = int(math.Ceil(float64(value) * (float64(total)) / 100)) + } else { + value = int(math.Floor(float64(value) * (float64(total)) / 100)) + } + } + return value, nil +} + +func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) { + switch intOrStr.Type { + case Int: + return intOrStr.IntValue(), false, nil + case String: + s := strings.Replace(intOrStr.StrVal, "%", "", -1) + v, err := strconv.Atoi(s) + if err != nil { + return 0, false, fmt.Errorf("invalid value %q: %v", intOrStr.StrVal, err) + } + return int(v), true, nil + } + return 0, false, fmt.Errorf("invalid type: neither int nor percentage") +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go new file mode 100644 index 000000000..e8054a12e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -0,0 +1,107 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "bytes" + "encoding/json" + "io" +) + +// NewEncoder delegates to json.NewEncoder +// It is only here so this package can be a drop-in for common encoding/json uses +func NewEncoder(w io.Writer) *json.Encoder { + return json.NewEncoder(w) +} + +// Marshal delegates to json.Marshal +// It is only here so this package can be a drop-in for common encoding/json uses +func Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals the given data +// If v is a *map[string]interface{}, numbers are converted to int64 or float64 +func Unmarshal(data []byte, v interface{}) error { + switch v := v.(type) { + case *map[string]interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertMapNumbers(*v) + + default: + return json.Unmarshal(data, v) + } +} + +// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertMapNumbers(m map[string]interface{}) error { + var err error + for k, v := range m { + switch v := v.(type) { + case json.Number: + m[k], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v) + case []interface{}: + err = convertSliceNumbers(v) + } + if err != nil { + return err + } + } + return nil +} + +// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertSliceNumbers(s []interface{}) error { + var err error + for i, v := range s { + switch v := v.(type) { + case json.Number: + s[i], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v) + case []interface{}: + err = convertSliceNumbers(v) + } + if err != nil { + return err + } + } + return nil +} + +// convertNumber converts a json.Number to an int64 or float64, or returns an error +func convertNumber(n json.Number) (interface{}, error) { + // Attempt to convert to an int64 first + if i, err := n.Int64(); err == nil { + return i, nil + } + // Return a float64 (default json.Decode() behavior) + // An overflow will return an error + return n.Float64() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go new file mode 100644 index 000000000..adb80813b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -0,0 +1,401 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "bufio" + "bytes" + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + + "github.com/golang/glog" + "golang.org/x/net/http2" +) + +// IsProbableEOF returns true if the given error resembles a connection termination +// scenario that would justify assuming that the watch is empty. +// These errors are what the Go http stack returns back to us which are general +// connection closure errors (strongly correlated) and callers that need to +// differentiate probable errors in connection behavior between normal "this is +// disconnected" should use the method. +func IsProbableEOF(err error) bool { + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + switch { + case err == io.EOF: + return true + case err.Error() == "http: can't write HTTP request on broken connection": + return true + case strings.Contains(err.Error(), "connection reset by peer"): + return true + case strings.Contains(strings.ToLower(err.Error()), "use of closed network connection"): + return true + } + return false +} + +var defaultTransport = http.DefaultTransport.(*http.Transport) + +// SetOldTransportDefaults applies the defaults from http.DefaultTransport +// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset +func SetOldTransportDefaults(t *http.Transport) *http.Transport { + if t.Proxy == nil || isDefault(t.Proxy) { + // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings + // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY + t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) + } + if t.Dial == nil { + t.Dial = defaultTransport.Dial + } + if t.TLSHandshakeTimeout == 0 { + t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout + } + return t +} + +// SetTransportDefaults applies the defaults from http.DefaultTransport +// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset +func SetTransportDefaults(t *http.Transport) *http.Transport { + t = SetOldTransportDefaults(t) + // Allow clients to disable http2 if needed. + if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { + glog.Infof("HTTP2 has been explicitly disabled") + } else { + if err := http2.ConfigureTransport(t); err != nil { + glog.Warningf("Transport failed http2 configuration: %v", err) + } + } + return t +} + +type RoundTripperWrapper interface { + http.RoundTripper + WrappedRoundTripper() http.RoundTripper +} + +type DialFunc func(net, addr string) (net.Conn, error) + +func DialerFor(transport http.RoundTripper) (DialFunc, error) { + if transport == nil { + return nil, nil + } + + switch transport := transport.(type) { + case *http.Transport: + return transport.Dial, nil + case RoundTripperWrapper: + return DialerFor(transport.WrappedRoundTripper()) + default: + return nil, fmt.Errorf("unknown transport type: %v", transport) + } +} + +type TLSClientConfigHolder interface { + TLSClientConfig() *tls.Config +} + +func TLSClientConfig(transport http.RoundTripper) (*tls.Config, error) { + if transport == nil { + return nil, nil + } + + switch transport := transport.(type) { + case *http.Transport: + return transport.TLSClientConfig, nil + case TLSClientConfigHolder: + return transport.TLSClientConfig(), nil + case RoundTripperWrapper: + return TLSClientConfig(transport.WrappedRoundTripper()) + default: + return nil, fmt.Errorf("unknown transport type: %v", transport) + } +} + +func FormatURL(scheme string, host string, port int, path string) *url.URL { + return &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(host, strconv.Itoa(port)), + Path: path, + } +} + +func GetHTTPClient(req *http.Request) string { + if userAgent, ok := req.Header["User-Agent"]; ok { + if len(userAgent) > 0 { + return userAgent[0] + } + } + return "unknown" +} + +// SourceIPs splits the comma separated X-Forwarded-For header or returns the X-Real-Ip header or req.RemoteAddr, +// in that order, ignoring invalid IPs. It returns nil if all of these are empty or invalid. +func SourceIPs(req *http.Request) []net.IP { + hdr := req.Header + // First check the X-Forwarded-For header for requests via proxy. + hdrForwardedFor := hdr.Get("X-Forwarded-For") + forwardedForIPs := []net.IP{} + if hdrForwardedFor != "" { + // X-Forwarded-For can be a csv of IPs in case of multiple proxies. + // Use the first valid one. + parts := strings.Split(hdrForwardedFor, ",") + for _, part := range parts { + ip := net.ParseIP(strings.TrimSpace(part)) + if ip != nil { + forwardedForIPs = append(forwardedForIPs, ip) + } + } + } + if len(forwardedForIPs) > 0 { + return forwardedForIPs + } + + // Try the X-Real-Ip header. + hdrRealIp := hdr.Get("X-Real-Ip") + if hdrRealIp != "" { + ip := net.ParseIP(hdrRealIp) + if ip != nil { + return []net.IP{ip} + } + } + + // Fallback to Remote Address in request, which will give the correct client IP when there is no proxy. + // Remote Address in Go's HTTP server is in the form host:port so we need to split that first. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err == nil { + if remoteIP := net.ParseIP(host); remoteIP != nil { + return []net.IP{remoteIP} + } + } + + // Fallback if Remote Address was just IP. + if remoteIP := net.ParseIP(req.RemoteAddr); remoteIP != nil { + return []net.IP{remoteIP} + } + + return nil +} + +// Extracts and returns the clients IP from the given request. +// Looks at X-Forwarded-For header, X-Real-Ip header and request.RemoteAddr in that order. +// Returns nil if none of them are set or is set to an invalid value. +func GetClientIP(req *http.Request) net.IP { + ips := SourceIPs(req) + if len(ips) == 0 { + return nil + } + return ips[0] +} + +// Prepares the X-Forwarded-For header for another forwarding hop by appending the previous sender's +// IP address to the X-Forwarded-For chain. +func AppendForwardedForHeader(req *http.Request) { + // Copied from net/http/httputil/reverseproxy.go: + if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil { + // If we aren't the first proxy retain prior + // X-Forwarded-For information as a comma+space + // separated list and fold multiple headers into one. + if prior, ok := req.Header["X-Forwarded-For"]; ok { + clientIP = strings.Join(prior, ", ") + ", " + clientIP + } + req.Header.Set("X-Forwarded-For", clientIP) + } +} + +var defaultProxyFuncPointer = fmt.Sprintf("%p", http.ProxyFromEnvironment) + +// isDefault checks to see if the transportProxierFunc is pointing to the default one +func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool { + transportProxierPointer := fmt.Sprintf("%p", transportProxier) + return transportProxierPointer == defaultProxyFuncPointer +} + +// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if +// no matching CIDRs are found +func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { + // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it + noProxyEnv := os.Getenv("NO_PROXY") + noProxyRules := strings.Split(noProxyEnv, ",") + + cidrs := []*net.IPNet{} + for _, noProxyRule := range noProxyRules { + _, cidr, _ := net.ParseCIDR(noProxyRule) + if cidr != nil { + cidrs = append(cidrs, cidr) + } + } + + if len(cidrs) == 0 { + return delegate + } + + return func(req *http.Request) (*url.URL, error) { + host := req.URL.Host + // for some urls, the Host is already the host, not the host:port + if net.ParseIP(host) == nil { + var err error + host, _, err = net.SplitHostPort(req.URL.Host) + if err != nil { + return delegate(req) + } + } + + ip := net.ParseIP(host) + if ip == nil { + return delegate(req) + } + + for _, cidr := range cidrs { + if cidr.Contains(ip) { + return nil, nil + } + } + + return delegate(req) + } +} + +// Dialer dials a host and writes a request to it. +type Dialer interface { + // Dial connects to the host specified by req's URL, writes the request to the connection, and + // returns the opened net.Conn. + Dial(req *http.Request) (net.Conn, error) +} + +// ConnectWithRedirects uses dialer to send req, following up to 10 redirects (relative to +// originalLocation). It returns the opened net.Conn and the raw response bytes. +func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer) (net.Conn, []byte, error) { + const ( + maxRedirects = 10 + maxResponseSize = 16384 // play it safe to allow the potential for lots of / large headers + ) + + var ( + location = originalLocation + method = originalMethod + intermediateConn net.Conn + rawResponse = bytes.NewBuffer(make([]byte, 0, 256)) + body = originalBody + ) + + defer func() { + if intermediateConn != nil { + intermediateConn.Close() + } + }() + +redirectLoop: + for redirects := 0; ; redirects++ { + if redirects > maxRedirects { + return nil, nil, fmt.Errorf("too many redirects (%d)", redirects) + } + + req, err := http.NewRequest(method, location.String(), body) + if err != nil { + return nil, nil, err + } + + req.Header = header + + intermediateConn, err = dialer.Dial(req) + if err != nil { + return nil, nil, err + } + + // Peek at the backend response. + rawResponse.Reset() + respReader := bufio.NewReader(io.TeeReader( + io.LimitReader(intermediateConn, maxResponseSize), // Don't read more than maxResponseSize bytes. + rawResponse)) // Save the raw response. + resp, err := http.ReadResponse(respReader, nil) + if err != nil { + // Unable to read the backend response; let the client handle it. + glog.Warningf("Error reading backend response: %v", err) + break redirectLoop + } + + switch resp.StatusCode { + case http.StatusFound: + // Redirect, continue. + default: + // Don't redirect. + break redirectLoop + } + + // Redirected requests switch to "GET" according to the HTTP spec: + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3 + method = "GET" + // don't send a body when following redirects + body = nil + + resp.Body.Close() // not used + + // Reset the connection. + intermediateConn.Close() + intermediateConn = nil + + // Prepare to follow the redirect. + redirectStr := resp.Header.Get("Location") + if redirectStr == "" { + return nil, nil, fmt.Errorf("%d response missing Location header", resp.StatusCode) + } + // We have to parse relative to the current location, NOT originalLocation. For example, + // if we request http://foo.com/a and get back "http://bar.com/b", the result should be + // http://bar.com/b. If we then make that request and get back a redirect to "/c", the result + // should be http://bar.com/c, not http://foo.com/c. + location, err = location.Parse(redirectStr) + if err != nil { + return nil, nil, fmt.Errorf("malformed Location header: %v", err) + } + } + + connToReturn := intermediateConn + intermediateConn = nil // Don't close the connection when we return it. + return connToReturn, rawResponse.Bytes(), nil +} + +// CloneRequest creates a shallow copy of the request along with a deep copy of the Headers. +func CloneRequest(req *http.Request) *http.Request { + r := new(http.Request) + + // shallow clone + *r = *req + + // deep copy headers + r.Header = CloneHeader(req.Header) + + return r +} + +// CloneHeader creates a deep copy of an http.Header. +func CloneHeader(in http.Header) http.Header { + out := make(http.Header, len(in)) + for key, values := range in { + newValues := make([]string, len(values)) + copy(newValues, values) + out[key] = newValues + } + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go new file mode 100644 index 000000000..a1e53d2e4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -0,0 +1,278 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "bufio" + "encoding/hex" + "fmt" + "io" + "net" + "os" + + "strings" + + "github.com/golang/glog" +) + +type Route struct { + Interface string + Destination net.IP + Gateway net.IP + // TODO: add more fields here if needed +} + +func getRoutes(input io.Reader) ([]Route, error) { + routes := []Route{} + if input == nil { + return nil, fmt.Errorf("input is nil") + } + scanner := bufio.NewReader(input) + for { + line, err := scanner.ReadString('\n') + if err == io.EOF { + break + } + //ignore the headers in the route info + if strings.HasPrefix(line, "Iface") { + continue + } + fields := strings.Fields(line) + routes = append(routes, Route{}) + route := &routes[len(routes)-1] + route.Interface = fields[0] + ip, err := parseIP(fields[1]) + if err != nil { + return nil, err + } + route.Destination = ip + ip, err = parseIP(fields[2]) + if err != nil { + return nil, err + } + route.Gateway = ip + } + return routes, nil +} + +func parseIP(str string) (net.IP, error) { + if str == "" { + return nil, fmt.Errorf("input is nil") + } + bytes, err := hex.DecodeString(str) + if err != nil { + return nil, err + } + //TODO add ipv6 support + if len(bytes) != net.IPv4len { + return nil, fmt.Errorf("only IPv4 is supported") + } + bytes[0], bytes[1], bytes[2], bytes[3] = bytes[3], bytes[2], bytes[1], bytes[0] + return net.IP(bytes), nil +} + +func isInterfaceUp(intf *net.Interface) bool { + if intf == nil { + return false + } + if intf.Flags&net.FlagUp != 0 { + glog.V(4).Infof("Interface %v is up", intf.Name) + return true + } + return false +} + +//getFinalIP method receives all the IP addrs of a Interface +//and returns a nil if the address is Loopback, Ipv6, link-local or nil. +//It returns a valid IPv4 if an Ipv4 address is found in the array. +func getFinalIP(addrs []net.Addr) (net.IP, error) { + if len(addrs) > 0 { + for i := range addrs { + glog.V(4).Infof("Checking addr %s.", addrs[i].String()) + ip, _, err := net.ParseCIDR(addrs[i].String()) + if err != nil { + return nil, err + } + //Only IPv4 + //TODO : add IPv6 support + if ip.To4() != nil { + if !ip.IsLoopback() && !ip.IsLinkLocalMulticast() && !ip.IsLinkLocalUnicast() { + glog.V(4).Infof("IP found %v", ip) + return ip, nil + } else { + glog.V(4).Infof("Loopback/link-local found %v", ip) + } + } else { + glog.V(4).Infof("%v is not a valid IPv4 address", ip) + } + + } + } + return nil, nil +} + +func getIPFromInterface(intfName string, nw networkInterfacer) (net.IP, error) { + intf, err := nw.InterfaceByName(intfName) + if err != nil { + return nil, err + } + if isInterfaceUp(intf) { + addrs, err := nw.Addrs(intf) + if err != nil { + return nil, err + } + glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) + finalIP, err := getFinalIP(addrs) + if err != nil { + return nil, err + } + if finalIP != nil { + glog.V(4).Infof("valid IPv4 address for interface %q found as %v.", intfName, finalIP) + return finalIP, nil + } + } + + return nil, nil +} + +func flagsSet(flags net.Flags, test net.Flags) bool { + return flags&test != 0 +} + +func flagsClear(flags net.Flags, test net.Flags) bool { + return flags&test == 0 +} + +func chooseHostInterfaceNativeGo() (net.IP, error) { + intfs, err := net.Interfaces() + if err != nil { + return nil, err + } + i := 0 + var ip net.IP + for i = range intfs { + if flagsSet(intfs[i].Flags, net.FlagUp) && flagsClear(intfs[i].Flags, net.FlagLoopback|net.FlagPointToPoint) { + addrs, err := intfs[i].Addrs() + if err != nil { + return nil, err + } + if len(addrs) > 0 { + for _, addr := range addrs { + if addrIP, _, err := net.ParseCIDR(addr.String()); err == nil { + if addrIP.To4() != nil { + ip = addrIP.To4() + if !ip.IsLinkLocalMulticast() && !ip.IsLinkLocalUnicast() { + break + } + } + } + } + if ip != nil { + // This interface should suffice. + break + } + } + } + } + if ip == nil { + return nil, fmt.Errorf("no acceptable interface from host") + } + glog.V(4).Infof("Choosing interface %s (IP %v) as default", intfs[i].Name, ip) + return ip, nil +} + +//ChooseHostInterface is a method used fetch an IP for a daemon. +//It uses data from /proc/net/route file. +//For a node with no internet connection ,it returns error +//For a multi n/w interface node it returns the IP of the interface with gateway on it. +func ChooseHostInterface() (net.IP, error) { + inFile, err := os.Open("/proc/net/route") + if err != nil { + if os.IsNotExist(err) { + return chooseHostInterfaceNativeGo() + } + return nil, err + } + defer inFile.Close() + var nw networkInterfacer = networkInterface{} + return chooseHostInterfaceFromRoute(inFile, nw) +} + +type networkInterfacer interface { + InterfaceByName(intfName string) (*net.Interface, error) + Addrs(intf *net.Interface) ([]net.Addr, error) +} + +type networkInterface struct{} + +func (_ networkInterface) InterfaceByName(intfName string) (*net.Interface, error) { + intf, err := net.InterfaceByName(intfName) + if err != nil { + return nil, err + } + return intf, nil +} + +func (_ networkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { + addrs, err := intf.Addrs() + if err != nil { + return nil, err + } + return addrs, nil +} + +func chooseHostInterfaceFromRoute(inFile io.Reader, nw networkInterfacer) (net.IP, error) { + routes, err := getRoutes(inFile) + if err != nil { + return nil, err + } + zero := net.IP{0, 0, 0, 0} + var finalIP net.IP + for i := range routes { + //find interface with gateway + if routes[i].Destination.Equal(zero) { + glog.V(4).Infof("Default route transits interface %q", routes[i].Interface) + finalIP, err := getIPFromInterface(routes[i].Interface, nw) + if err != nil { + return nil, err + } + if finalIP != nil { + glog.V(4).Infof("Choosing IP %v ", finalIP) + return finalIP, nil + } + } + } + glog.V(4).Infof("No valid IP found") + if finalIP == nil { + return nil, fmt.Errorf("Unable to select an IP.") + } + return nil, nil +} + +// If bind-address is usable, return it directly +// If bind-address is not usable (unset, 0.0.0.0, or loopback), we will use the host's default +// interface. +func ChooseBindAddress(bindAddress net.IP) (net.IP, error) { + if bindAddress == nil || bindAddress.IsUnspecified() || bindAddress.IsLoopback() { + hostIP, err := ChooseHostInterface() + if err != nil { + return nil, err + } + bindAddress = hostIP + } + return bindAddress, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go new file mode 100644 index 000000000..6a50e6186 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go @@ -0,0 +1,113 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "fmt" + "strconv" + "strings" +) + +// PortRange represents a range of TCP/UDP ports. To represent a single port, +// set Size to 1. +type PortRange struct { + Base int + Size int +} + +// Contains tests whether a given port falls within the PortRange. +func (pr *PortRange) Contains(p int) bool { + return (p >= pr.Base) && ((p - pr.Base) < pr.Size) +} + +// String converts the PortRange to a string representation, which can be +// parsed by PortRange.Set or ParsePortRange. +func (pr PortRange) String() string { + if pr.Size == 0 { + return "" + } + return fmt.Sprintf("%d-%d", pr.Base, pr.Base+pr.Size-1) +} + +// Set parses a string of the form "min-max", inclusive at both ends, and +// sets the PortRange from it. This is part of the flag.Value and pflag.Value +// interfaces. +func (pr *PortRange) Set(value string) error { + value = strings.TrimSpace(value) + + // TODO: Accept "80" syntax + // TODO: Accept "80+8" syntax + + if value == "" { + pr.Base = 0 + pr.Size = 0 + return nil + } + + hyphenIndex := strings.Index(value, "-") + if hyphenIndex == -1 { + return fmt.Errorf("expected hyphen in port range") + } + + var err error + var low int + var high int + low, err = strconv.Atoi(value[:hyphenIndex]) + if err == nil { + high, err = strconv.Atoi(value[hyphenIndex+1:]) + } + if err != nil { + return fmt.Errorf("unable to parse port range: %s: %v", value, err) + } + + if low > 65535 || high > 65535 { + return fmt.Errorf("the port range cannot be greater than 65535: %s", value) + } + + if high < low { + return fmt.Errorf("end port cannot be less than start port: %s", value) + } + + pr.Base = low + pr.Size = 1 + high - low + return nil +} + +// Type returns a descriptive string about this type. This is part of the +// pflag.Value interface. +func (*PortRange) Type() string { + return "portRange" +} + +// ParsePortRange parses a string of the form "min-max", inclusive at both +// ends, and initializs a new PortRange from it. +func ParsePortRange(value string) (*PortRange, error) { + pr := &PortRange{} + err := pr.Set(value) + if err != nil { + return nil, err + } + return pr, nil +} + +func ParsePortRangeOrDie(value string) *PortRange { + pr, err := ParsePortRange(value) + if err != nil { + panic(fmt.Sprintf("couldn't parse port range %q: %v", value, err)) + } + return pr +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go new file mode 100644 index 000000000..c0fd4e20f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go @@ -0,0 +1,77 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/sets" +) + +var validSchemes = sets.NewString("http", "https", "") + +// SplitSchemeNamePort takes a string of the following forms: +// * "", returns "", "","", true +// * ":", returns "", "","",true +// * "::", returns "","","",true +// +// Name must be non-empty or valid will be returned false. +// Scheme must be "http" or "https" if specified +// Port is returned as a string, and it is not required to be numeric (could be +// used for a named port, for example). +func SplitSchemeNamePort(id string) (scheme, name, port string, valid bool) { + parts := strings.Split(id, ":") + switch len(parts) { + case 1: + name = parts[0] + case 2: + name = parts[0] + port = parts[1] + case 3: + scheme = parts[0] + name = parts[1] + port = parts[2] + default: + return "", "", "", false + } + + if len(name) > 0 && validSchemes.Has(scheme) { + return scheme, name, port, true + } else { + return "", "", "", false + } +} + +// JoinSchemeNamePort returns a string that specifies the scheme, name, and port: +// * "" +// * ":" +// * "::" +// None of the parameters may contain a ':' character +// Name is required +// Scheme must be "", "http", or "https" +func JoinSchemeNamePort(scheme, name, port string) string { + if len(scheme) > 0 { + // Must include three segments to specify scheme + return scheme + ":" + name + ":" + port + } + if len(port) > 0 { + // Must include two segments to specify port + return name + ":" + port + } + // Return name alone + return name +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go new file mode 100644 index 000000000..461144f0b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go @@ -0,0 +1,46 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "net" + "reflect" + "syscall" +) + +// IPNetEqual checks if the two input IPNets are representing the same subnet. +// For example, +// 10.0.0.1/24 and 10.0.0.0/24 are the same subnet. +// 10.0.0.1/24 and 10.0.0.0/25 are not the same subnet. +func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool { + if ipnet1 == nil || ipnet2 == nil { + return false + } + if reflect.DeepEqual(ipnet1.Mask, ipnet2.Mask) && ipnet1.Contains(ipnet2.IP) && ipnet2.Contains(ipnet1.IP) { + return true + } + return false +} + +// Returns if the given err is "connection reset by peer" error. +func IsConnectionReset(err error) bool { + opErr, ok := err.(*net.OpError) + if ok && opErr.Err.Error() == syscall.ECONNRESET.Error() { + return true + } + return false +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go new file mode 100644 index 000000000..db109c2cd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go @@ -0,0 +1,85 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rand provides utilities related to randomization. +package rand + +import ( + "math/rand" + "sync" + "time" +) + +var rng = struct { + sync.Mutex + rand *rand.Rand +}{ + rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())), +} + +// Intn generates an integer in range [0,max). +// By design this should panic if input is invalid, <= 0. +func Intn(max int) int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Intn(max) +} + +// IntnRange generates an integer in range [min,max). +// By design this should panic if input is invalid, <= 0. +func IntnRange(min, max int) int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Intn(max-min) + min +} + +// IntnRange generates an int64 integer in range [min,max). +// By design this should panic if input is invalid, <= 0. +func Int63nRange(min, max int64) int64 { + rng.Lock() + defer rng.Unlock() + return rng.rand.Int63n(max-min) + min +} + +// Seed seeds the rng with the provided seed. +func Seed(seed int64) { + rng.Lock() + defer rng.Unlock() + + rng.rand = rand.New(rand.NewSource(seed)) +} + +// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n) +// from the default Source. +func Perm(n int) []int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Perm(n) +} + +// We omit vowels from the set of available characters to reduce the chances +// of "bad words" being formed. +var alphanums = []rune("bcdfghjklmnpqrstvwxz0123456789") + +// String generates a random alphanumeric string, without vowels, which is n +// characters long. This will panic if n is less than zero. +func String(length int) string { + b := make([]rune, length) + for i := range b { + b[i] = alphanums[Intn(len(alphanums))] + } + return string(b) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go new file mode 100644 index 000000000..acfeb827c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + DefaultStreamCreationTimeout = 30 * time.Second + + // The SPDY subprotocol "channel.k8s.io" is used for remote command + // attachment/execution. This represents the initial unversioned subprotocol, + // which has the known bugs http://issues.k8s.io/13394 and + // http://issues.k8s.io/13395. + StreamProtocolV1Name = "channel.k8s.io" + + // The SPDY subprotocol "v2.channel.k8s.io" is used for remote command + // attachment/execution. It is the second version of the subprotocol and + // resolves the issues present in the first version. + StreamProtocolV2Name = "v2.channel.k8s.io" + + // The SPDY subprotocol "v3.channel.k8s.io" is used for remote command + // attachment/execution. It is the third version of the subprotocol and + // adds support for resizing container terminals. + StreamProtocolV3Name = "v3.channel.k8s.io" + + // The SPDY subprotocol "v4.channel.k8s.io" is used for remote command + // attachment/execution. It is the 4th version of the subprotocol and + // adds support for exit codes. + StreamProtocolV4Name = "v4.channel.k8s.io" + + NonZeroExitCodeReason = metav1.StatusReason("NonZeroExitCode") + ExitCodeCauseType = metav1.CauseType("ExitCode") +) + +var SupportedStreamingProtocols = []string{StreamProtocolV4Name, StreamProtocolV3Name, StreamProtocolV2Name, StreamProtocolV1Name} diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go new file mode 100644 index 000000000..748174e19 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -0,0 +1,161 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "runtime" + "sync" + "time" + + "github.com/golang/glog" +) + +var ( + // ReallyCrash controls the behavior of HandleCrash and now defaults + // true. It's still exposed so components can optionally set to false + // to restore prior behavior. + ReallyCrash = true +) + +// PanicHandlers is a list of functions which will be invoked when a panic happens. +var PanicHandlers = []func(interface{}){logPanic} + +// HandleCrash simply catches a crash and logs an error. Meant to be called via +// defer. Additional context-specific handlers can be provided, and will be +// called in case of panic. HandleCrash actually crashes, after calling the +// handlers and logging the panic message. +// +// TODO: remove this function. We are switching to a world where it's safe for +// apiserver to panic, since it will be restarted by kubelet. At the beginning +// of the Kubernetes project, nothing was going to restart apiserver and so +// catching panics was important. But it's actually much simpler for montoring +// software if we just exit when an unexpected panic happens. +func HandleCrash(additionalHandlers ...func(interface{})) { + if r := recover(); r != nil { + for _, fn := range PanicHandlers { + fn(r) + } + for _, fn := range additionalHandlers { + fn(r) + } + if ReallyCrash { + // Actually proceed to panic. + panic(r) + } + } +} + +// logPanic logs the caller tree when a panic occurs. +func logPanic(r interface{}) { + callers := getCallers(r) + glog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers) +} + +func getCallers(r interface{}) string { + callers := "" + for i := 0; true; i++ { + _, file, line, ok := runtime.Caller(i) + if !ok { + break + } + callers = callers + fmt.Sprintf("%v:%v\n", file, line) + } + + return callers +} + +// ErrorHandlers is a list of functions which will be invoked when an unreturnable +// error occurs. +// TODO(lavalamp): for testability, this and the below HandleError function +// should be packaged up into a testable and reusable object. +var ErrorHandlers = []func(error){ + logError, + (&rudimentaryErrorBackoff{ + lastErrorTime: time.Now(), + // 1ms was the number folks were able to stomach as a global rate limit. + // If you need to log errors more than 1000 times a second you + // should probably consider fixing your code instead. :) + minPeriod: time.Millisecond, + }).OnError, +} + +// HandlerError is a method to invoke when a non-user facing piece of code cannot +// return an error and needs to indicate it has been ignored. Invoking this method +// is preferable to logging the error - the default behavior is to log but the +// errors may be sent to a remote server for analysis. +func HandleError(err error) { + // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead + if err == nil { + return + } + + for _, fn := range ErrorHandlers { + fn(err) + } +} + +// logError prints an error with the call stack of the location it was reported +func logError(err error) { + glog.ErrorDepth(2, err) +} + +type rudimentaryErrorBackoff struct { + minPeriod time.Duration // immutable + // TODO(lavalamp): use the clock for testability. Need to move that + // package for that to be accessible here. + lastErrorTimeLock sync.Mutex + lastErrorTime time.Time +} + +// OnError will block if it is called more often than the embedded period time. +// This will prevent overly tight hot error loops. +func (r *rudimentaryErrorBackoff) OnError(error) { + r.lastErrorTimeLock.Lock() + defer r.lastErrorTimeLock.Unlock() + d := time.Since(r.lastErrorTime) + if d < r.minPeriod { + time.Sleep(r.minPeriod - d) + } + r.lastErrorTime = time.Now() +} + +// GetCaller returns the caller of the function that calls it. +func GetCaller() string { + var pc [1]uintptr + runtime.Callers(3, pc[:]) + f := runtime.FuncForPC(pc[0]) + if f == nil { + return fmt.Sprintf("Unable to find caller") + } + return f.Name() +} + +// RecoverFromPanic replaces the specified error with an error containing the +// original error, and the call tree when a panic occurs. This enables error +// handlers to handle errors and panics the same way. +func RecoverFromPanic(err *error) { + if r := recover(); r != nil { + callers := getCallers(r) + + *err = fmt.Errorf( + "recovered from panic %q. (err=%v) Call stack:\n%v", + r, + *err, + callers) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go new file mode 100644 index 000000000..a460e4b1f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go @@ -0,0 +1,203 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by set-gen. Do not edit it manually! + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption. +type Byte map[byte]Empty + +// New creates a Byte from a list of values. +func NewByte(items ...byte) Byte { + ss := Byte{} + ss.Insert(items...) + return ss +} + +// ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func ByteKeySet(theMap interface{}) Byte { + v := reflect.ValueOf(theMap) + ret := Byte{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(byte)) + } + return ret +} + +// Insert adds items to the set. +func (s Byte) Insert(items ...byte) { + for _, item := range items { + s[item] = Empty{} + } +} + +// Delete removes all items from the set. +func (s Byte) Delete(items ...byte) { + for _, item := range items { + delete(s, item) + } +} + +// Has returns true if and only if item is contained in the set. +func (s Byte) Has(item byte) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Byte) HasAll(items ...byte) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Byte) HasAny(items ...byte) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Byte) Difference(s2 Byte) Byte { + result := NewByte() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Byte) Union(s2 Byte) Byte { + result := NewByte() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Byte) Intersection(s2 Byte) Byte { + var walk, other Byte + result := NewByte() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Byte) IsSuperset(s2 Byte) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Byte) Equal(s2 Byte) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfByte []byte + +func (s sortableSliceOfByte) Len() int { return len(s) } +func (s sortableSliceOfByte) Less(i, j int) bool { return lessByte(s[i], s[j]) } +func (s sortableSliceOfByte) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted byte slice. +func (s Byte) List() []byte { + res := make(sortableSliceOfByte, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []byte(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Byte) UnsortedList() []byte { + res := make([]byte, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Byte) PopAny() (byte, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue byte + return zeroValue, false +} + +// Len returns the size of the set. +func (s Byte) Len() int { + return len(s) +} + +func lessByte(lhs, rhs byte) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go new file mode 100644 index 000000000..28a6a7d5c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by set-gen. Do not edit it manually! + +// Package sets has auto-generated set types. +package sets diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go new file mode 100644 index 000000000..cd22b953a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by set-gen. Do not edit it manually! + +package sets + +// Empty is public since it is used by some internal API objects for conversions between external +// string arrays and internal sets, and conversion logic requires public types today. +type Empty struct{} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go new file mode 100644 index 000000000..0614e9fb0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go @@ -0,0 +1,203 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by set-gen. Do not edit it manually! + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption. +type Int map[int]Empty + +// New creates a Int from a list of values. +func NewInt(items ...int) Int { + ss := Int{} + ss.Insert(items...) + return ss +} + +// IntKeySet creates a Int from a keys of a map[int](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func IntKeySet(theMap interface{}) Int { + v := reflect.ValueOf(theMap) + ret := Int{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int)) + } + return ret +} + +// Insert adds items to the set. +func (s Int) Insert(items ...int) { + for _, item := range items { + s[item] = Empty{} + } +} + +// Delete removes all items from the set. +func (s Int) Delete(items ...int) { + for _, item := range items { + delete(s, item) + } +} + +// Has returns true if and only if item is contained in the set. +func (s Int) Has(item int) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int) HasAll(items ...int) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int) HasAny(items ...int) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int) Difference(s2 Int) Int { + result := NewInt() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int) Union(s2 Int) Int { + result := NewInt() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int) Intersection(s2 Int) Int { + var walk, other Int + result := NewInt() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int) IsSuperset(s2 Int) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int) Equal(s2 Int) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt []int + +func (s sortableSliceOfInt) Len() int { return len(s) } +func (s sortableSliceOfInt) Less(i, j int) bool { return lessInt(s[i], s[j]) } +func (s sortableSliceOfInt) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int slice. +func (s Int) List() []int { + res := make(sortableSliceOfInt, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int) UnsortedList() []int { + res := make([]int, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int) PopAny() (int, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int) Len() int { + return len(s) +} + +func lessInt(lhs, rhs int) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go new file mode 100644 index 000000000..82e1ba782 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go @@ -0,0 +1,203 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by set-gen. Do not edit it manually! + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption. +type Int64 map[int64]Empty + +// New creates a Int64 from a list of values. +func NewInt64(items ...int64) Int64 { + ss := Int64{} + ss.Insert(items...) + return ss +} + +// Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int64KeySet(theMap interface{}) Int64 { + v := reflect.ValueOf(theMap) + ret := Int64{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int64)) + } + return ret +} + +// Insert adds items to the set. +func (s Int64) Insert(items ...int64) { + for _, item := range items { + s[item] = Empty{} + } +} + +// Delete removes all items from the set. +func (s Int64) Delete(items ...int64) { + for _, item := range items { + delete(s, item) + } +} + +// Has returns true if and only if item is contained in the set. +func (s Int64) Has(item int64) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int64) HasAll(items ...int64) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int64) HasAny(items ...int64) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int64) Difference(s2 Int64) Int64 { + result := NewInt64() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int64) Union(s2 Int64) Int64 { + result := NewInt64() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int64) Intersection(s2 Int64) Int64 { + var walk, other Int64 + result := NewInt64() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int64) IsSuperset(s2 Int64) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int64) Equal(s2 Int64) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt64 []int64 + +func (s sortableSliceOfInt64) Len() int { return len(s) } +func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) } +func (s sortableSliceOfInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int64 slice. +func (s Int64) List() []int64 { + res := make(sortableSliceOfInt64, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int64(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int64) UnsortedList() []int64 { + res := make([]int64, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int64) PopAny() (int64, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int64 + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int64) Len() int { + return len(s) +} + +func lessInt64(lhs, rhs int64) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go new file mode 100644 index 000000000..baef7a6a2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go @@ -0,0 +1,203 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by set-gen. Do not edit it manually! + +package sets + +import ( + "reflect" + "sort" +) + +// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. +type String map[string]Empty + +// New creates a String from a list of values. +func NewString(items ...string) String { + ss := String{} + ss.Insert(items...) + return ss +} + +// StringKeySet creates a String from a keys of a map[string](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func StringKeySet(theMap interface{}) String { + v := reflect.ValueOf(theMap) + ret := String{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(string)) + } + return ret +} + +// Insert adds items to the set. +func (s String) Insert(items ...string) { + for _, item := range items { + s[item] = Empty{} + } +} + +// Delete removes all items from the set. +func (s String) Delete(items ...string) { + for _, item := range items { + delete(s, item) + } +} + +// Has returns true if and only if item is contained in the set. +func (s String) Has(item string) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s String) HasAll(items ...string) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s String) HasAny(items ...string) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s String) Difference(s2 String) String { + result := NewString() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 String) Union(s2 String) String { + result := NewString() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 String) Intersection(s2 String) String { + var walk, other String + result := NewString() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 String) IsSuperset(s2 String) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 String) Equal(s2 String) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfString []string + +func (s sortableSliceOfString) Len() int { return len(s) } +func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) } +func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted string slice. +func (s String) List() []string { + res := make(sortableSliceOfString, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []string(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s String) UnsortedList() []string { + res := make([]string, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s String) PopAny() (string, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue string + return zeroValue, false +} + +// Len returns the size of the set. +func (s String) Len() int { + return len(s) +} + +func lessString(lhs, rhs string) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go new file mode 100644 index 000000000..43c779a11 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -0,0 +1,254 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package field + +import ( + "fmt" + "reflect" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Error is an implementation of the 'error' interface, which represents a +// field-level validation error. +type Error struct { + Type ErrorType + Field string + BadValue interface{} + Detail string +} + +var _ error = &Error{} + +// Error implements the error interface. +func (v *Error) Error() string { + return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody()) +} + +// ErrorBody returns the error message without the field name. This is useful +// for building nice-looking higher-level error reporting. +func (v *Error) ErrorBody() string { + var s string + switch v.Type { + case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal: + s = fmt.Sprintf("%s", v.Type) + default: + value := v.BadValue + valueType := reflect.TypeOf(value) + if value == nil || valueType == nil { + value = "null" + } else if valueType.Kind() == reflect.Ptr { + if reflectValue := reflect.ValueOf(value); reflectValue.IsNil() { + value = "null" + } else { + value = reflectValue.Elem().Interface() + } + } + switch t := value.(type) { + case int64, int32, float64, float32, bool: + // use simple printer for simple types + s = fmt.Sprintf("%s: %v", v.Type, value) + case string: + s = fmt.Sprintf("%s: %q", v.Type, t) + case fmt.Stringer: + // anything that defines String() is better than raw struct + s = fmt.Sprintf("%s: %s", v.Type, t.String()) + default: + // fallback to raw struct + // TODO: internal types have panic guards against json.Marshalling to prevent + // accidental use of internal types in external serialized form. For now, use + // %#v, although it would be better to show a more expressive output in the future + s = fmt.Sprintf("%s: %#v", v.Type, value) + } + } + if len(v.Detail) != 0 { + s += fmt.Sprintf(": %s", v.Detail) + } + return s +} + +// ErrorType is a machine readable value providing more detail about why +// a field is invalid. These values are expected to match 1-1 with +// CauseType in api/types.go. +type ErrorType string + +// TODO: These values are duplicated in api/types.go, but there's a circular dep. Fix it. +const ( + // ErrorTypeNotFound is used to report failure to find a requested value + // (e.g. looking up an ID). See NotFound(). + ErrorTypeNotFound ErrorType = "FieldValueNotFound" + // ErrorTypeRequired is used to report required values that are not + // provided (e.g. empty strings, null values, or empty arrays). See + // Required(). + ErrorTypeRequired ErrorType = "FieldValueRequired" + // ErrorTypeDuplicate is used to report collisions of values that must be + // unique (e.g. unique IDs). See Duplicate(). + ErrorTypeDuplicate ErrorType = "FieldValueDuplicate" + // ErrorTypeInvalid is used to report malformed values (e.g. failed regex + // match, too long, out of bounds). See Invalid(). + ErrorTypeInvalid ErrorType = "FieldValueInvalid" + // ErrorTypeNotSupported is used to report unknown values for enumerated + // fields (e.g. a list of valid values). See NotSupported(). + ErrorTypeNotSupported ErrorType = "FieldValueNotSupported" + // ErrorTypeForbidden is used to report valid (as per formatting rules) + // values which would be accepted under some conditions, but which are not + // permitted by the current conditions (such as security policy). See + // Forbidden(). + ErrorTypeForbidden ErrorType = "FieldValueForbidden" + // ErrorTypeTooLong is used to report that the given value is too long. + // This is similar to ErrorTypeInvalid, but the error will not include the + // too-long value. See TooLong(). + ErrorTypeTooLong ErrorType = "FieldValueTooLong" + // ErrorTypeInternal is used to report other errors that are not related + // to user input. See InternalError(). + ErrorTypeInternal ErrorType = "InternalError" +) + +// String converts a ErrorType into its corresponding canonical error message. +func (t ErrorType) String() string { + switch t { + case ErrorTypeNotFound: + return "Not found" + case ErrorTypeRequired: + return "Required value" + case ErrorTypeDuplicate: + return "Duplicate value" + case ErrorTypeInvalid: + return "Invalid value" + case ErrorTypeNotSupported: + return "Unsupported value" + case ErrorTypeForbidden: + return "Forbidden" + case ErrorTypeTooLong: + return "Too long" + case ErrorTypeInternal: + return "Internal error" + default: + panic(fmt.Sprintf("unrecognized validation error: %q", string(t))) + } +} + +// NotFound returns a *Error indicating "value not found". This is +// used to report failure to find a requested value (e.g. looking up an ID). +func NotFound(field *Path, value interface{}) *Error { + return &Error{ErrorTypeNotFound, field.String(), value, ""} +} + +// Required returns a *Error indicating "value required". This is used +// to report required values that are not provided (e.g. empty strings, null +// values, or empty arrays). +func Required(field *Path, detail string) *Error { + return &Error{ErrorTypeRequired, field.String(), "", detail} +} + +// Duplicate returns a *Error indicating "duplicate value". This is +// used to report collisions of values that must be unique (e.g. names or IDs). +func Duplicate(field *Path, value interface{}) *Error { + return &Error{ErrorTypeDuplicate, field.String(), value, ""} +} + +// Invalid returns a *Error indicating "invalid value". This is used +// to report malformed values (e.g. failed regex match, too long, out of bounds). +func Invalid(field *Path, value interface{}, detail string) *Error { + return &Error{ErrorTypeInvalid, field.String(), value, detail} +} + +// NotSupported returns a *Error indicating "unsupported value". +// This is used to report unknown values for enumerated fields (e.g. a list of +// valid values). +func NotSupported(field *Path, value interface{}, validValues []string) *Error { + detail := "" + if validValues != nil && len(validValues) > 0 { + detail = "supported values: " + strings.Join(validValues, ", ") + } + return &Error{ErrorTypeNotSupported, field.String(), value, detail} +} + +// Forbidden returns a *Error indicating "forbidden". This is used to +// report valid (as per formatting rules) values which would be accepted under +// some conditions, but which are not permitted by current conditions (e.g. +// security policy). +func Forbidden(field *Path, detail string) *Error { + return &Error{ErrorTypeForbidden, field.String(), "", detail} +} + +// TooLong returns a *Error indicating "too long". This is used to +// report that the given value is too long. This is similar to +// Invalid, but the returned error will not include the too-long +// value. +func TooLong(field *Path, value interface{}, maxLength int) *Error { + return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)} +} + +// InternalError returns a *Error indicating "internal error". This is used +// to signal that an error was found that was not directly related to user +// input. The err argument must be non-nil. +func InternalError(field *Path, err error) *Error { + return &Error{ErrorTypeInternal, field.String(), nil, err.Error()} +} + +// ErrorList holds a set of Errors. It is plausible that we might one day have +// non-field errors in this same umbrella package, but for now we don't, so +// we can keep it simple and leave ErrorList here. +type ErrorList []*Error + +// NewErrorTypeMatcher returns an errors.Matcher that returns true +// if the provided error is a Error and has the provided ErrorType. +func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher { + return func(err error) bool { + if e, ok := err.(*Error); ok { + return e.Type == t + } + return false + } +} + +// ToAggregate converts the ErrorList into an errors.Aggregate. +func (list ErrorList) ToAggregate() utilerrors.Aggregate { + errs := make([]error, 0, len(list)) + errorMsgs := sets.NewString() + for _, err := range list { + msg := fmt.Sprintf("%v", err) + if errorMsgs.Has(msg) { + continue + } + errorMsgs.Insert(msg) + errs = append(errs, err) + } + return utilerrors.NewAggregate(errs) +} + +func fromAggregate(agg utilerrors.Aggregate) ErrorList { + errs := agg.Errors() + list := make(ErrorList, len(errs)) + for i := range errs { + list[i] = errs[i].(*Error) + } + return list +} + +// Filter removes items from the ErrorList that match the provided fns. +func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList { + err := utilerrors.FilterOut(list.ToAggregate(), fns...) + if err == nil { + return nil + } + // FilterOut takes an Aggregate and returns an Aggregate + return fromAggregate(err.(utilerrors.Aggregate)) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go new file mode 100644 index 000000000..2efc8eec7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go @@ -0,0 +1,91 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package field + +import ( + "bytes" + "fmt" + "strconv" +) + +// Path represents the path from some root to a particular field. +type Path struct { + name string // the name of this field or "" if this is an index + index string // if name == "", this is a subscript (index or map key) of the previous element + parent *Path // nil if this is the root element +} + +// NewPath creates a root Path object. +func NewPath(name string, moreNames ...string) *Path { + r := &Path{name: name, parent: nil} + for _, anotherName := range moreNames { + r = &Path{name: anotherName, parent: r} + } + return r +} + +// Root returns the root element of this Path. +func (p *Path) Root() *Path { + for ; p.parent != nil; p = p.parent { + // Do nothing. + } + return p +} + +// Child creates a new Path that is a child of the method receiver. +func (p *Path) Child(name string, moreNames ...string) *Path { + r := NewPath(name, moreNames...) + r.Root().parent = p + return r +} + +// Index indicates that the previous Path is to be subscripted by an int. +// This sets the same underlying value as Key. +func (p *Path) Index(index int) *Path { + return &Path{index: strconv.Itoa(index), parent: p} +} + +// Key indicates that the previous Path is to be subscripted by a string. +// This sets the same underlying value as Index. +func (p *Path) Key(key string) *Path { + return &Path{index: key, parent: p} +} + +// String produces a string representation of the Path. +func (p *Path) String() string { + // make a slice to iterate + elems := []*Path{} + for ; p != nil; p = p.parent { + elems = append(elems, p) + } + + // iterate, but it has to be backwards + buf := bytes.NewBuffer(nil) + for i := range elems { + p := elems[len(elems)-1-i] + if p.parent != nil && len(p.name) > 0 { + // This is either the root or it is a subscript. + buf.WriteString(".") + } + if len(p.name) > 0 { + buf.WriteString(p.name) + } else { + fmt.Fprintf(buf, "[%s]", p.index) + } + } + return buf.String() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go new file mode 100644 index 000000000..1e5b85047 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -0,0 +1,345 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "math" + "net" + "regexp" + "strings" + + "k8s.io/apimachinery/pkg/types" +) + +const qnameCharFmt string = "[A-Za-z0-9]" +const qnameExtCharFmt string = "[-A-Za-z0-9_.]" +const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt +const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" +const qualifiedNameMaxLength int = 63 + +var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$") + +// IsQualifiedName tests whether the value passed is what Kubernetes calls a +// "qualified name". This is a format used in various places throughout the +// system. If the value is not valid, a list of error strings is returned. +// Otherwise an empty list (or nil) is returned. +func IsQualifiedName(value string) []string { + var errs []string + parts := strings.Split(value, "/") + var name string + switch len(parts) { + case 1: + name = parts[0] + case 2: + var prefix string + prefix, name = parts[0], parts[1] + if len(prefix) == 0 { + errs = append(errs, "prefix part "+EmptyError()) + } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 { + errs = append(errs, prefixEach(msgs, "prefix part ")...) + } + default: + return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+ + " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')") + } + + if len(name) == 0 { + errs = append(errs, "name part "+EmptyError()) + } else if len(name) > qualifiedNameMaxLength { + errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength)) + } + if !qualifiedNameRegexp.MatchString(name) { + errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")) + } + return errs +} + +const labelValueFmt string = "(" + qualifiedNameFmt + ")?" +const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" +const LabelValueMaxLength int = 63 + +var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") + +// IsValidLabelValue tests whether the value passed is a valid label value. If +// the value is not valid, a list of error strings is returned. Otherwise an +// empty list (or nil) is returned. +func IsValidLabelValue(value string) []string { + var errs []string + if len(value) > LabelValueMaxLength { + errs = append(errs, MaxLenError(LabelValueMaxLength)) + } + if !labelValueRegexp.MatchString(value) { + errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345")) + } + return errs +} + +const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" +const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" +const DNS1123LabelMaxLength int = 63 + +var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") + +// IsDNS1123Label tests for a string that conforms to the definition of a label in +// DNS (RFC 1123). +func IsDNS1123Label(value string) []string { + var errs []string + if len(value) > DNS1123LabelMaxLength { + errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) + } + if !dns1123LabelRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + } + return errs +} + +const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" +const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" +const DNS1123SubdomainMaxLength int = 253 + +var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") + +// IsDNS1123Subdomain tests for a string that conforms to the definition of a +// subdomain in DNS (RFC 1123). +func IsDNS1123Subdomain(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !dns1123SubdomainRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com")) + } + return errs +} + +const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" +const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" +const DNS1035LabelMaxLength int = 63 + +var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") + +// IsDNS1035Label tests for a string that conforms to the definition of a label in +// DNS (RFC 1035). +func IsDNS1035Label(value string) []string { + var errs []string + if len(value) > DNS1035LabelMaxLength { + errs = append(errs, MaxLenError(DNS1035LabelMaxLength)) + } + if !dns1035LabelRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1035LabelErrMsg, dns1035LabelFmt, "my-name", "abc-123")) + } + return errs +} + +// wildcard definition - RFC 1034 section 4.3.3. +// examples: +// - valid: *.bar.com, *.foo.bar.com +// - invalid: *.*.bar.com, *.foo.*.com, *bar.com, f*.bar.com, * +const wildcardDNS1123SubdomainFmt = "\\*\\." + dns1123SubdomainFmt +const wildcardDNS1123SubdomainErrMsg = "a wildcard DNS-1123 subdomain must start with '*.', followed by a valid DNS subdomain, which must consist of lower case alphanumeric characters, '-' or '.' and end with an alphanumeric character" + +// IsWildcardDNS1123Subdomain tests for a string that conforms to the definition of a +// wildcard subdomain in DNS (RFC 1034 section 4.3.3). +func IsWildcardDNS1123Subdomain(value string) []string { + wildcardDNS1123SubdomainRegexp := regexp.MustCompile("^" + wildcardDNS1123SubdomainFmt + "$") + + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !wildcardDNS1123SubdomainRegexp.MatchString(value) { + errs = append(errs, RegexError(wildcardDNS1123SubdomainErrMsg, wildcardDNS1123SubdomainFmt, "*.example.com")) + } + return errs +} + +const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*" +const identifierErrMsg string = "a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'" + +var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$") + +// IsCIdentifier tests for a string that conforms the definition of an identifier +// in C. This checks the format, but not the length. +func IsCIdentifier(value string) []string { + if !cIdentifierRegexp.MatchString(value) { + return []string{RegexError(identifierErrMsg, cIdentifierFmt, "my_name", "MY_NAME", "MyName")} + } + return nil +} + +// IsValidPortNum tests that the argument is a valid, non-zero port number. +func IsValidPortNum(port int) []string { + if 1 <= port && port <= 65535 { + return nil + } + return []string{InclusiveRangeError(1, 65535)} +} + +// Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1 +// TODO: once we have a type for UID/GID we should make these that type. +const ( + minUserID = 0 + maxUserID = math.MaxInt32 + minGroupID = 0 + maxGroupID = math.MaxInt32 +) + +// IsValidGroupID tests that the argument is a valid Unix GID. +func IsValidGroupID(gid types.UnixGroupID) []string { + if minGroupID <= gid && gid <= maxGroupID { + return nil + } + return []string{InclusiveRangeError(minGroupID, maxGroupID)} +} + +// IsValidUserID tests that the argument is a valid Unix UID. +func IsValidUserID(uid types.UnixUserID) []string { + if minUserID <= uid && uid <= maxUserID { + return nil + } + return []string{InclusiveRangeError(minUserID, maxUserID)} +} + +var portNameCharsetRegex = regexp.MustCompile("^[-a-z0-9]+$") +var portNameOneLetterRegexp = regexp.MustCompile("[a-z]") + +// IsValidPortName check that the argument is valid syntax. It must be +// non-empty and no more than 15 characters long. It may contain only [-a-z0-9] +// and must contain at least one letter [a-z]. It must not start or end with a +// hyphen, nor contain adjacent hyphens. +// +// Note: We only allow lower-case characters, even though RFC 6335 is case +// insensitive. +func IsValidPortName(port string) []string { + var errs []string + if len(port) > 15 { + errs = append(errs, MaxLenError(15)) + } + if !portNameCharsetRegex.MatchString(port) { + errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)") + } + if !portNameOneLetterRegexp.MatchString(port) { + errs = append(errs, "must contain at least one letter or number (a-z, 0-9)") + } + if strings.Contains(port, "--") { + errs = append(errs, "must not contain consecutive hyphens") + } + if len(port) > 0 && (port[0] == '-' || port[len(port)-1] == '-') { + errs = append(errs, "must not begin or end with a hyphen") + } + return errs +} + +// IsValidIP tests that the argument is a valid IP address. +func IsValidIP(value string) []string { + if net.ParseIP(value) == nil { + return []string{"must be a valid IP address, (e.g. 10.9.8.7)"} + } + return nil +} + +const percentFmt string = "[0-9]+%" +const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" + +var percentRegexp = regexp.MustCompile("^" + percentFmt + "$") + +func IsValidPercent(percent string) []string { + if !percentRegexp.MatchString(percent) { + return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")} + } + return nil +} + +const httpHeaderNameFmt string = "[-A-Za-z0-9]+" +const httpHeaderNameErrMsg string = "a valid HTTP header must consist of alphanumeric characters or '-'" + +var httpHeaderNameRegexp = regexp.MustCompile("^" + httpHeaderNameFmt + "$") + +// IsHTTPHeaderName checks that a string conforms to the Go HTTP library's +// definition of a valid header field name (a stricter subset than RFC7230). +func IsHTTPHeaderName(value string) []string { + if !httpHeaderNameRegexp.MatchString(value) { + return []string{RegexError(httpHeaderNameErrMsg, httpHeaderNameFmt, "X-Header-Name")} + } + return nil +} + +const configMapKeyFmt = `[-._a-zA-Z0-9]+` +const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'" + +var configMapKeyRegexp = regexp.MustCompile("^" + configMapKeyFmt + "$") + +// IsConfigMapKey tests for a string that is a valid key for a ConfigMap or Secret +func IsConfigMapKey(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !configMapKeyRegexp.MatchString(value) { + errs = append(errs, RegexError(configMapKeyErrMsg, configMapKeyFmt, "key.name", "KEY_NAME", "key-name")) + } + if value == "." { + errs = append(errs, `must not be '.'`) + } else if value == ".." { + errs = append(errs, `must not be '..'`) + } else if strings.HasPrefix(value, "..") { + errs = append(errs, `must not start with '..'`) + } + return errs +} + +// MaxLenError returns a string explanation of a "string too long" validation +// failure. +func MaxLenError(length int) string { + return fmt.Sprintf("must be no more than %d characters", length) +} + +// RegexError returns a string explanation of a regex validation failure. +func RegexError(msg string, fmt string, examples ...string) string { + if len(examples) == 0 { + return msg + " (regex used for validation is '" + fmt + "')" + } + msg += " (e.g. " + for i := range examples { + if i > 0 { + msg += " or " + } + msg += "'" + examples[i] + "', " + } + msg += "regex used for validation is '" + fmt + "')" + return msg +} + +// EmptyError returns a string explanation of a "must not be empty" validation +// failure. +func EmptyError() string { + return "must be non-empty" +} + +func prefixEach(msgs []string, prefix string) []string { + for i := range msgs { + msgs[i] = prefix + msgs[i] + } + return msgs +} + +// InclusiveRangeError returns a string explanation of a numeric "must be +// between" validation failure. +func InclusiveRangeError(lo, hi int) string { + return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go new file mode 100644 index 000000000..3f0c968ec --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package wait provides tools for polling or listening for changes +// to a condition. +package wait // import "k8s.io/apimachinery/pkg/util/wait" diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go new file mode 100644 index 000000000..badaa2159 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -0,0 +1,349 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "errors" + "math/rand" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" +) + +// For any test of the style: +// ... +// <- time.After(timeout): +// t.Errorf("Timed out") +// The value for timeout should effectively be "forever." Obviously we don't want our tests to truly lock up forever, but 30s +// is long enough that it is effectively forever for the things that can slow down a run on a heavily contended machine +// (GC, seeks, etc), but not so long as to make a developer ctrl-c a test run if they do happen to break that test. +var ForeverTestTimeout = time.Second * 30 + +// NeverStop may be passed to Until to make it never stop. +var NeverStop <-chan struct{} = make(chan struct{}) + +// Forever calls f every period for ever. +// +// Forever is syntactic sugar on top of Until. +func Forever(f func(), period time.Duration) { + Until(f, period, NeverStop) +} + +// Until loops until stop channel is closed, running f every period. +// +// Until is syntactic sugar on top of JitterUntil with zero jitter factor and +// with sliding = true (which means the timer for period starts after the f +// completes). +func Until(f func(), period time.Duration, stopCh <-chan struct{}) { + JitterUntil(f, period, 0.0, true, stopCh) +} + +// NonSlidingUntil loops until stop channel is closed, running f every +// period. +// +// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter +// factor, with sliding = false (meaning the timer for period starts at the same +// time as the function starts). +func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { + JitterUntil(f, period, 0.0, false, stopCh) +} + +// JitterUntil loops until stop channel is closed, running f every period. +// +// If jitterFactor is positive, the period is jittered before every run of f. +// If jitterFactor is not positive, the period is unchanged and not jittered. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +// +// Close stopCh to stop. f may not be invoked if stop channel is already +// closed. Pass NeverStop to if you don't want it stop. +func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { + var t *time.Timer + var sawTimeout bool + + for { + select { + case <-stopCh: + return + default: + } + + jitteredPeriod := period + if jitterFactor > 0.0 { + jitteredPeriod = Jitter(period, jitterFactor) + } + + if !sliding { + t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout) + } + + func() { + defer runtime.HandleCrash() + f() + }() + + if sliding { + t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout) + } + + // NOTE: b/c there is no priority selection in golang + // it is possible for this to race, meaning we could + // trigger t.C and stopCh, and t.C select falls through. + // In order to mitigate we re-check stopCh at the beginning + // of every loop to prevent extra executions of f(). + select { + case <-stopCh: + return + case <-t.C: + sawTimeout = true + } + } +} + +// Jitter returns a time.Duration between duration and duration + maxFactor * +// duration. +// +// This allows clients to avoid converging on periodic behavior. If maxFactor +// is 0.0, a suggested default value will be chosen. +func Jitter(duration time.Duration, maxFactor float64) time.Duration { + if maxFactor <= 0.0 { + maxFactor = 1.0 + } + wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration)) + return wait +} + +// ErrWaitTimeout is returned when the condition exited without success. +var ErrWaitTimeout = errors.New("timed out waiting for the condition") + +// ConditionFunc returns true if the condition is satisfied, or an error +// if the loop should be aborted. +type ConditionFunc func() (done bool, err error) + +// Backoff holds parameters applied to a Backoff function. +type Backoff struct { + Duration time.Duration // the base duration + Factor float64 // Duration is multiplied by factor each iteration + Jitter float64 // The amount of jitter applied each iteration + Steps int // Exit with error after this many steps +} + +// ExponentialBackoff repeats a condition check with exponential backoff. +// +// It checks the condition up to Steps times, increasing the wait by multiplying +// the previous duration by Factor. +// +// If Jitter is greater than zero, a random amount of each duration is added +// (between duration and duration*(1+jitter)). +// +// If the condition never returns true, ErrWaitTimeout is returned. All other +// errors terminate immediately. +func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { + duration := backoff.Duration + for i := 0; i < backoff.Steps; i++ { + if i != 0 { + adjusted := duration + if backoff.Jitter > 0.0 { + adjusted = Jitter(duration, backoff.Jitter) + } + time.Sleep(adjusted) + duration = time.Duration(float64(duration) * backoff.Factor) + } + if ok, err := condition(); err != nil || ok { + return err + } + } + return ErrWaitTimeout +} + +// Poll tries a condition func until it returns true, an error, or the timeout +// is reached. +// +// Poll always waits the interval before the run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to Poll something forever, see PollInfinite. +func Poll(interval, timeout time.Duration, condition ConditionFunc) error { + return pollInternal(poller(interval, timeout), condition) +} + +func pollInternal(wait WaitFunc, condition ConditionFunc) error { + done := make(chan struct{}) + defer close(done) + return WaitFor(wait, condition, done) +} + +// PollImmediate tries a condition func until it returns true, an error, or the timeout +// is reached. +// +// Poll always checks 'condition' before waiting for the interval. 'condition' +// will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to Poll something forever, see PollInfinite. +func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { + return pollImmediateInternal(poller(interval, timeout), condition) +} + +func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error { + done, err := condition() + if err != nil { + return err + } + if done { + return nil + } + return pollInternal(wait, condition) +} + +// PollInfinite tries a condition func until it returns true or an error +// +// PollInfinite always waits the interval before the run of 'condition'. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +func PollInfinite(interval time.Duration, condition ConditionFunc) error { + done := make(chan struct{}) + defer close(done) + return PollUntil(interval, condition, done) +} + +// PollImmediateInfinite tries a condition func until it returns true or an error +// +// PollImmediateInfinite runs the 'condition' before waiting for the interval. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { + done, err := condition() + if err != nil { + return err + } + if done { + return nil + } + return PollInfinite(interval, condition) +} + +// PollUntil tries a condition func until it returns true, an error or stopCh is +// closed. +// +// PolUntil always waits interval before the first run of 'condition'. +// 'condition' will always be invoked at least once. +func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + return WaitFor(poller(interval, 0), condition, stopCh) +} + +// WaitFunc creates a channel that receives an item every time a test +// should be executed and is closed when the last test should be invoked. +type WaitFunc func(done <-chan struct{}) <-chan struct{} + +// WaitFor continually checks 'fn' as driven by 'wait'. +// +// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value +// placed on the channel and once more when the channel is closed. +// +// If 'fn' returns an error the loop ends and that error is returned, and if +// 'fn' returns true the loop ends and nil is returned. +// +// ErrWaitTimeout will be returned if the channel is closed without fn ever +// returning true. +func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { + c := wait(done) + for { + _, open := <-c + ok, err := fn() + if err != nil { + return err + } + if ok { + return nil + } + if !open { + break + } + } + return ErrWaitTimeout +} + +// poller returns a WaitFunc that will send to the channel every interval until +// timeout has elapsed and then closes the channel. +// +// Over very short intervals you may receive no ticks before the channel is +// closed. A timeout of 0 is interpreted as an infinity. +// +// Output ticks are not buffered. If the channel is not ready to receive an +// item, the tick is skipped. +func poller(interval, timeout time.Duration) WaitFunc { + return WaitFunc(func(done <-chan struct{}) <-chan struct{} { + ch := make(chan struct{}) + + go func() { + defer close(ch) + + tick := time.NewTicker(interval) + defer tick.Stop() + + var after <-chan time.Time + if timeout != 0 { + // time.After is more convenient, but it + // potentially leaves timers around much longer + // than necessary if we exit early. + timer := time.NewTimer(timeout) + after = timer.C + defer timer.Stop() + } + + for { + select { + case <-tick.C: + // If the consumer isn't ready for this signal drop it and + // check the other channels. + select { + case ch <- struct{}{}: + default: + } + case <-after: + return + case <-done: + return + } + } + }() + + return ch + }) +} + +// resetOrReuseTimer avoids allocating a new timer if one is already in use. +// Not safe for multiple threads. +func resetOrReuseTimer(t *time.Timer, d time.Duration, sawTimeout bool) *time.Timer { + if t == nil { + return time.NewTimer(d) + } + if !t.Stop() && !sawTimeout { + <-t.C + } + t.Reset(d) + return t +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go new file mode 100644 index 000000000..6ebfaea70 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -0,0 +1,346 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strings" + "unicode" + + "github.com/ghodss/yaml" + "github.com/golang/glog" +) + +// ToJSON converts a single YAML document into a JSON document +// or returns an error. If the document appears to be JSON the +// YAML decoding path is not used (so that error messages are +// JSON specific). +func ToJSON(data []byte) ([]byte, error) { + if hasJSONPrefix(data) { + return data, nil + } + return yaml.YAMLToJSON(data) +} + +// YAMLToJSONDecoder decodes YAML documents from an io.Reader by +// separating individual documents. It first converts the YAML +// body to JSON, then unmarshals the JSON. +type YAMLToJSONDecoder struct { + reader Reader +} + +// NewYAMLToJSONDecoder decodes YAML documents from the provided +// stream in chunks by converting each document (as defined by +// the YAML spec) into its own chunk, converting it to JSON via +// yaml.YAMLToJSON, and then passing it to json.Decoder. +func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder { + reader := bufio.NewReader(r) + return &YAMLToJSONDecoder{ + reader: NewYAMLReader(reader), + } +} + +// Decode reads a YAML document as JSON from the stream or returns +// an error. The decoding rules match json.Unmarshal, not +// yaml.Unmarshal. +func (d *YAMLToJSONDecoder) Decode(into interface{}) error { + bytes, err := d.reader.Read() + if err != nil && err != io.EOF { + return err + } + + if len(bytes) != 0 { + err := yaml.Unmarshal(bytes, into) + if err != nil { + return YAMLSyntaxError{err} + } + } + return err +} + +// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if +// the data is not sufficient. +type YAMLDecoder struct { + r io.ReadCloser + scanner *bufio.Scanner + remaining []byte +} + +// NewDocumentDecoder decodes YAML documents from the provided +// stream in chunks by converting each document (as defined by +// the YAML spec) into its own chunk. io.ErrShortBuffer will be +// returned if the entire buffer could not be read to assist +// the caller in framing the chunk. +func NewDocumentDecoder(r io.ReadCloser) io.ReadCloser { + scanner := bufio.NewScanner(r) + scanner.Split(splitYAMLDocument) + return &YAMLDecoder{ + r: r, + scanner: scanner, + } +} + +// Read reads the previous slice into the buffer, or attempts to read +// the next chunk. +// TODO: switch to readline approach. +func (d *YAMLDecoder) Read(data []byte) (n int, err error) { + left := len(d.remaining) + if left == 0 { + // return the next chunk from the stream + if !d.scanner.Scan() { + err := d.scanner.Err() + if err == nil { + err = io.EOF + } + return 0, err + } + out := d.scanner.Bytes() + d.remaining = out + left = len(out) + } + + // fits within data + if left <= len(data) { + copy(data, d.remaining) + d.remaining = nil + return len(d.remaining), nil + } + + // caller will need to reread + copy(data, d.remaining[:left]) + d.remaining = d.remaining[left:] + return len(data), io.ErrShortBuffer +} + +func (d *YAMLDecoder) Close() error { + return d.r.Close() +} + +const yamlSeparator = "\n---" +const separator = "---" + +// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents. +func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + sep := len([]byte(yamlSeparator)) + if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 { + // We have a potential document terminator + i += sep + after := data[i:] + if len(after) == 0 { + // we can't read any more characters + if atEOF { + return len(data), data[:len(data)-sep], nil + } + return 0, nil, nil + } + if j := bytes.IndexByte(after, '\n'); j >= 0 { + return i + j + 1, data[0 : i-sep], nil + } + return 0, nil, nil + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} + +// decoder is a convenience interface for Decode. +type decoder interface { + Decode(into interface{}) error +} + +// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or +// YAML documents by sniffing for a leading { character. +type YAMLOrJSONDecoder struct { + r io.Reader + bufferSize int + + decoder decoder + rawData []byte +} + +type JSONSyntaxError struct { + Line int + Err error +} + +func (e JSONSyntaxError) Error() string { + return fmt.Sprintf("json: line %d: %s", e.Line, e.Err.Error()) +} + +type YAMLSyntaxError struct { + err error +} + +func (e YAMLSyntaxError) Error() string { + return e.err.Error() +} + +// NewYAMLOrJSONDecoder returns a decoder that will process YAML documents +// or JSON documents from the given reader as a stream. bufferSize determines +// how far into the stream the decoder will look to figure out whether this +// is a JSON stream (has whitespace followed by an open brace). +func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder { + return &YAMLOrJSONDecoder{ + r: r, + bufferSize: bufferSize, + } +} + +// Decode unmarshals the next object from the underlying stream into the +// provide object, or returns an error. +func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { + if d.decoder == nil { + buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize) + if isJSON { + glog.V(4).Infof("decoding stream as JSON") + d.decoder = json.NewDecoder(buffer) + d.rawData = origData + } else { + glog.V(4).Infof("decoding stream as YAML") + d.decoder = NewYAMLToJSONDecoder(buffer) + } + } + err := d.decoder.Decode(into) + if jsonDecoder, ok := d.decoder.(*json.Decoder); ok { + if syntax, ok := err.(*json.SyntaxError); ok { + data, readErr := ioutil.ReadAll(jsonDecoder.Buffered()) + if readErr != nil { + glog.V(4).Infof("reading stream failed: %v", readErr) + } + js := string(data) + + // if contents from io.Reader are not complete, + // use the original raw data to prevent panic + if int64(len(js)) <= syntax.Offset { + js = string(d.rawData) + } + + start := strings.LastIndex(js[:syntax.Offset], "\n") + 1 + line := strings.Count(js[:start], "\n") + return JSONSyntaxError{ + Line: line, + Err: fmt.Errorf(syntax.Error()), + } + } + } + return err +} + +type Reader interface { + Read() ([]byte, error) +} + +type YAMLReader struct { + reader Reader +} + +func NewYAMLReader(r *bufio.Reader) *YAMLReader { + return &YAMLReader{ + reader: &LineReader{reader: r}, + } +} + +// Read returns a full YAML document. +func (r *YAMLReader) Read() ([]byte, error) { + var buffer bytes.Buffer + for { + line, err := r.reader.Read() + if err != nil && err != io.EOF { + return nil, err + } + + sep := len([]byte(separator)) + if i := bytes.Index(line, []byte(separator)); i == 0 { + // We have a potential document terminator + i += sep + after := line[i:] + if len(strings.TrimRightFunc(string(after), unicode.IsSpace)) == 0 { + if buffer.Len() != 0 { + return buffer.Bytes(), nil + } + if err == io.EOF { + return nil, err + } + } + } + if err == io.EOF { + if buffer.Len() != 0 { + // If we're at EOF, we have a final, non-terminated line. Return it. + return buffer.Bytes(), nil + } + return nil, err + } + buffer.Write(line) + } +} + +type LineReader struct { + reader *bufio.Reader +} + +// Read returns a single line (with '\n' ended) from the underlying reader. +// An error is returned iff there is an error with the underlying reader. +func (r *LineReader) Read() ([]byte, error) { + var ( + isPrefix bool = true + err error = nil + line []byte + buffer bytes.Buffer + ) + + for isPrefix && err == nil { + line, isPrefix, err = r.reader.ReadLine() + buffer.Write(line) + } + buffer.WriteByte('\n') + return buffer.Bytes(), err +} + +// GuessJSONStream scans the provided reader up to size, looking +// for an open brace indicating this is JSON. It will return the +// bufio.Reader it creates for the consumer. +func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) { + buffer := bufio.NewReaderSize(r, size) + b, _ := buffer.Peek(size) + return buffer, b, hasJSONPrefix(b) +} + +var jsonPrefix = []byte("{") + +// hasJSONPrefix returns true if the provided buffer appears to start with +// a JSON open brace. +func hasJSONPrefix(buf []byte) bool { + return hasPrefix(buf, jsonPrefix) +} + +// Return true if the first non-whitespace bytes in buf is +// prefix. +func hasPrefix(buf []byte, prefix []byte) bool { + trim := bytes.TrimLeftFunc(buf, unicode.IsSpace) + return bytes.HasPrefix(trim, prefix) +} diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go new file mode 100644 index 000000000..5e77af7ea --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/version/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version supplies the type for version information collected at build time. +// +k8s:openapi-gen=true +package version // import "k8s.io/apimachinery/pkg/version" diff --git a/vendor/k8s.io/apimachinery/pkg/version/types.go b/vendor/k8s.io/apimachinery/pkg/version/types.go new file mode 100644 index 000000000..72727b503 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/version/types.go @@ -0,0 +1,37 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +// Info contains versioning information. +// TODO: Add []string of api versions supported? It's still unclear +// how we'll want to distribute that information. +type Info struct { + Major string `json:"major"` + Minor string `json:"minor"` + GitVersion string `json:"gitVersion"` + GitCommit string `json:"gitCommit"` + GitTreeState string `json:"gitTreeState"` + BuildDate string `json:"buildDate"` + GoVersion string `json:"goVersion"` + Compiler string `json:"compiler"` + Platform string `json:"platform"` +} + +// String returns info as a human-friendly version string. +func (info Info) String() string { + return info.GitVersion +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/doc.go b/vendor/k8s.io/apimachinery/pkg/watch/doc.go new file mode 100644 index 000000000..7e6bf3fb9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package watch contains a generic watchable interface, and a fake for +// testing code that uses the watch interface. +package watch // import "k8s.io/apimachinery/pkg/watch" diff --git a/vendor/k8s.io/apimachinery/pkg/watch/filter.go b/vendor/k8s.io/apimachinery/pkg/watch/filter.go new file mode 100644 index 000000000..3ca27f22c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/filter.go @@ -0,0 +1,109 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "sync" +) + +// FilterFunc should take an event, possibly modify it in some way, and return +// the modified event. If the event should be ignored, then return keep=false. +type FilterFunc func(in Event) (out Event, keep bool) + +// Filter passes all events through f before allowing them to pass on. +// Putting a filter on a watch, as an unavoidable side-effect due to the way +// go channels work, effectively causes the watch's event channel to have its +// queue length increased by one. +// +// WARNING: filter has a fatal flaw, in that it can't properly update the +// Type field (Add/Modified/Deleted) to reflect items beginning to pass the +// filter when they previously didn't. +// +func Filter(w Interface, f FilterFunc) Interface { + fw := &filteredWatch{ + incoming: w, + result: make(chan Event), + f: f, + } + go fw.loop() + return fw +} + +type filteredWatch struct { + incoming Interface + result chan Event + f FilterFunc +} + +// ResultChan returns a channel which will receive filtered events. +func (fw *filteredWatch) ResultChan() <-chan Event { + return fw.result +} + +// Stop stops the upstream watch, which will eventually stop this watch. +func (fw *filteredWatch) Stop() { + fw.incoming.Stop() +} + +// loop waits for new values, filters them, and resends them. +func (fw *filteredWatch) loop() { + defer close(fw.result) + for { + event, ok := <-fw.incoming.ResultChan() + if !ok { + break + } + filtered, keep := fw.f(event) + if keep { + fw.result <- filtered + } + } +} + +// Recorder records all events that are sent from the watch until it is closed. +type Recorder struct { + Interface + + lock sync.Mutex + events []Event +} + +var _ Interface = &Recorder{} + +// NewRecorder wraps an Interface and records any changes sent across it. +func NewRecorder(w Interface) *Recorder { + r := &Recorder{} + r.Interface = Filter(w, r.record) + return r +} + +// record is a FilterFunc and tracks each received event. +func (r *Recorder) record(in Event) (Event, bool) { + r.lock.Lock() + defer r.lock.Unlock() + r.events = append(r.events, in) + return in, true +} + +// Events returns a copy of the events sent across this recorder. +func (r *Recorder) Events() []Event { + r.lock.Lock() + defer r.lock.Unlock() + copied := make([]Event, len(r.events)) + copy(copied, r.events) + return copied +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/mux.go b/vendor/k8s.io/apimachinery/pkg/watch/mux.go new file mode 100644 index 000000000..fafccd78e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/mux.go @@ -0,0 +1,257 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// FullChannelBehavior controls how the Broadcaster reacts if a watcher's watch +// channel is full. +type FullChannelBehavior int + +const ( + WaitIfChannelFull FullChannelBehavior = iota + DropIfChannelFull +) + +// Buffer the incoming queue a little bit even though it should rarely ever accumulate +// anything, just in case a few events are received in such a short window that +// Broadcaster can't move them onto the watchers' queues fast enough. +const incomingQueueLength = 25 + +// Broadcaster distributes event notifications among any number of watchers. Every event +// is delivered to every watcher. +type Broadcaster struct { + // TODO: see if this lock is needed now that new watchers go through + // the incoming channel. + lock sync.Mutex + + watchers map[int64]*broadcasterWatcher + nextWatcher int64 + distributing sync.WaitGroup + + incoming chan Event + + // How large to make watcher's channel. + watchQueueLength int + // If one of the watch channels is full, don't wait for it to become empty. + // Instead just deliver it to the watchers that do have space in their + // channels and move on to the next event. + // It's more fair to do this on a per-watcher basis than to do it on the + // "incoming" channel, which would allow one slow watcher to prevent all + // other watchers from getting new events. + fullChannelBehavior FullChannelBehavior +} + +// NewBroadcaster creates a new Broadcaster. queueLength is the maximum number of events to queue per watcher. +// It is guaranteed that events will be distributed in the order in which they occur, +// but the order in which a single event is distributed among all of the watchers is unspecified. +func NewBroadcaster(queueLength int, fullChannelBehavior FullChannelBehavior) *Broadcaster { + m := &Broadcaster{ + watchers: map[int64]*broadcasterWatcher{}, + incoming: make(chan Event, incomingQueueLength), + watchQueueLength: queueLength, + fullChannelBehavior: fullChannelBehavior, + } + m.distributing.Add(1) + go m.loop() + return m +} + +const internalRunFunctionMarker = "internal-do-function" + +// a function type we can shoehorn into the queue. +type functionFakeRuntimeObject func() + +func (obj functionFakeRuntimeObject) GetObjectKind() schema.ObjectKind { + return schema.EmptyObjectKind +} + +// Execute f, blocking the incoming queue (and waiting for it to drain first). +// The purpose of this terrible hack is so that watchers added after an event +// won't ever see that event, and will always see any event after they are +// added. +func (b *Broadcaster) blockQueue(f func()) { + var wg sync.WaitGroup + wg.Add(1) + b.incoming <- Event{ + Type: internalRunFunctionMarker, + Object: functionFakeRuntimeObject(func() { + defer wg.Done() + f() + }), + } + wg.Wait() +} + +// Watch adds a new watcher to the list and returns an Interface for it. +// Note: new watchers will only receive new events. They won't get an entire history +// of previous events. +func (m *Broadcaster) Watch() Interface { + var w *broadcasterWatcher + m.blockQueue(func() { + m.lock.Lock() + defer m.lock.Unlock() + id := m.nextWatcher + m.nextWatcher++ + w = &broadcasterWatcher{ + result: make(chan Event, m.watchQueueLength), + stopped: make(chan struct{}), + id: id, + m: m, + } + m.watchers[id] = w + }) + return w +} + +// WatchWithPrefix adds a new watcher to the list and returns an Interface for it. It sends +// queuedEvents down the new watch before beginning to send ordinary events from Broadcaster. +// The returned watch will have a queue length that is at least large enough to accommodate +// all of the items in queuedEvents. +func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface { + var w *broadcasterWatcher + m.blockQueue(func() { + m.lock.Lock() + defer m.lock.Unlock() + id := m.nextWatcher + m.nextWatcher++ + length := m.watchQueueLength + if n := len(queuedEvents) + 1; n > length { + length = n + } + w = &broadcasterWatcher{ + result: make(chan Event, length), + stopped: make(chan struct{}), + id: id, + m: m, + } + m.watchers[id] = w + for _, e := range queuedEvents { + w.result <- e + } + }) + return w +} + +// stopWatching stops the given watcher and removes it from the list. +func (m *Broadcaster) stopWatching(id int64) { + m.lock.Lock() + defer m.lock.Unlock() + w, ok := m.watchers[id] + if !ok { + // No need to do anything, it's already been removed from the list. + return + } + delete(m.watchers, id) + close(w.result) +} + +// closeAll disconnects all watchers (presumably in response to a Shutdown call). +func (m *Broadcaster) closeAll() { + m.lock.Lock() + defer m.lock.Unlock() + for _, w := range m.watchers { + close(w.result) + } + // Delete everything from the map, since presence/absence in the map is used + // by stopWatching to avoid double-closing the channel. + m.watchers = map[int64]*broadcasterWatcher{} +} + +// Action distributes the given event among all watchers. +func (m *Broadcaster) Action(action EventType, obj runtime.Object) { + m.incoming <- Event{action, obj} +} + +// Shutdown disconnects all watchers (but any queued events will still be distributed). +// You must not call Action or Watch* after calling Shutdown. This call blocks +// until all events have been distributed through the outbound channels. Note +// that since they can be buffered, this means that the watchers might not +// have received the data yet as it can remain sitting in the buffered +// channel. +func (m *Broadcaster) Shutdown() { + close(m.incoming) + m.distributing.Wait() +} + +// loop receives from m.incoming and distributes to all watchers. +func (m *Broadcaster) loop() { + // Deliberately not catching crashes here. Yes, bring down the process if there's a + // bug in watch.Broadcaster. + for { + event, ok := <-m.incoming + if !ok { + break + } + if event.Type == internalRunFunctionMarker { + event.Object.(functionFakeRuntimeObject)() + continue + } + m.distribute(event) + } + m.closeAll() + m.distributing.Done() +} + +// distribute sends event to all watchers. Blocking. +func (m *Broadcaster) distribute(event Event) { + m.lock.Lock() + defer m.lock.Unlock() + if m.fullChannelBehavior == DropIfChannelFull { + for _, w := range m.watchers { + select { + case w.result <- event: + case <-w.stopped: + default: // Don't block if the event can't be queued. + } + } + } else { + for _, w := range m.watchers { + select { + case w.result <- event: + case <-w.stopped: + } + } + } +} + +// broadcasterWatcher handles a single watcher of a broadcaster +type broadcasterWatcher struct { + result chan Event + stopped chan struct{} + stop sync.Once + id int64 + m *Broadcaster +} + +// ResultChan returns a channel to use for waiting on events. +func (mw *broadcasterWatcher) ResultChan() <-chan Event { + return mw.result +} + +// Stop stops watching and removes mw from its list. +func (mw *broadcasterWatcher) Stop() { + mw.stop.Do(func() { + close(mw.stopped) + mw.m.stopWatching(mw.id) + }) +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go new file mode 100644 index 000000000..93bb1cdf7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -0,0 +1,119 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "io" + "sync" + + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// Decoder allows StreamWatcher to watch any stream for which a Decoder can be written. +type Decoder interface { + // Decode should return the type of event, the decoded object, or an error. + // An error will cause StreamWatcher to call Close(). Decode should block until + // it has data or an error occurs. + Decode() (action EventType, object runtime.Object, err error) + + // Close should close the underlying io.Reader, signalling to the source of + // the stream that it is no longer being watched. Close() must cause any + // outstanding call to Decode() to return with an error of some sort. + Close() +} + +// StreamWatcher turns any stream for which you can write a Decoder interface +// into a watch.Interface. +type StreamWatcher struct { + sync.Mutex + source Decoder + result chan Event + stopped bool +} + +// NewStreamWatcher creates a StreamWatcher from the given decoder. +func NewStreamWatcher(d Decoder) *StreamWatcher { + sw := &StreamWatcher{ + source: d, + // It's easy for a consumer to add buffering via an extra + // goroutine/channel, but impossible for them to remove it, + // so nonbuffered is better. + result: make(chan Event), + } + go sw.receive() + return sw +} + +// ResultChan implements Interface. +func (sw *StreamWatcher) ResultChan() <-chan Event { + return sw.result +} + +// Stop implements Interface. +func (sw *StreamWatcher) Stop() { + // Call Close() exactly once by locking and setting a flag. + sw.Lock() + defer sw.Unlock() + if !sw.stopped { + sw.stopped = true + sw.source.Close() + } +} + +// stopping returns true if Stop() was called previously. +func (sw *StreamWatcher) stopping() bool { + sw.Lock() + defer sw.Unlock() + return sw.stopped +} + +// receive reads result from the decoder in a loop and sends down the result channel. +func (sw *StreamWatcher) receive() { + defer close(sw.result) + defer sw.Stop() + defer utilruntime.HandleCrash() + for { + action, obj, err := sw.source.Decode() + if err != nil { + // Ignore expected error. + if sw.stopping() { + return + } + switch err { + case io.EOF: + // watch closed normally + case io.ErrUnexpectedEOF: + glog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) + default: + msg := "Unable to decode an event from the watch stream: %v" + if net.IsProbableEOF(err) { + glog.V(5).Infof(msg, err) + } else { + glog.Errorf(msg, err) + } + } + return + } + sw.result <- Event{ + Type: action, + Object: obj, + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/until.go b/vendor/k8s.io/apimachinery/pkg/watch/until.go new file mode 100644 index 000000000..c2772ddb5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/until.go @@ -0,0 +1,87 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "errors" + "time" + + "k8s.io/apimachinery/pkg/util/wait" +) + +// ConditionFunc returns true if the condition has been reached, false if it has not been reached yet, +// or an error if the condition cannot be checked and should terminate. In general, it is better to define +// level driven conditions over edge driven conditions (pod has ready=true, vs pod modified and ready changed +// from false to true). +type ConditionFunc func(event Event) (bool, error) + +// ErrWatchClosed is returned when the watch channel is closed before timeout in Until. +var ErrWatchClosed = errors.New("watch closed before Until timeout") + +// Until reads items from the watch until each provided condition succeeds, and then returns the last watch +// encountered. The first condition that returns an error terminates the watch (and the event is also returned). +// If no event has been received, the returned event will be nil. +// Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition. +// A zero timeout means to wait forever. +func Until(timeout time.Duration, watcher Interface, conditions ...ConditionFunc) (*Event, error) { + ch := watcher.ResultChan() + defer watcher.Stop() + var after <-chan time.Time + if timeout > 0 { + after = time.After(timeout) + } else { + ch := make(chan time.Time) + defer close(ch) + after = ch + } + var lastEvent *Event + for _, condition := range conditions { + // check the next condition against the previous event and short circuit waiting for the next watch + if lastEvent != nil { + done, err := condition(*lastEvent) + if err != nil { + return lastEvent, err + } + if done { + continue + } + } + ConditionSucceeded: + for { + select { + case event, ok := <-ch: + if !ok { + return lastEvent, ErrWatchClosed + } + lastEvent = &event + + // TODO: check for watch expired error and retry watch from latest point? + done, err := condition(event) + if err != nil { + return lastEvent, err + } + if done { + break ConditionSucceeded + } + + case <-after: + return lastEvent, wait.ErrWaitTimeout + } + } + } + return lastEvent, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go new file mode 100644 index 000000000..dd49c41f9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -0,0 +1,269 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/runtime" + + "github.com/golang/glog" +) + +// Interface can be implemented by anything that knows how to watch and report changes. +type Interface interface { + // Stops watching. Will close the channel returned by ResultChan(). Releases + // any resources used by the watch. + Stop() + + // Returns a chan which will receive all the events. If an error occurs + // or Stop() is called, this channel will be closed, in which case the + // watch should be completely cleaned up. + ResultChan() <-chan Event +} + +// EventType defines the possible types of events. +type EventType string + +const ( + Added EventType = "ADDED" + Modified EventType = "MODIFIED" + Deleted EventType = "DELETED" + Error EventType = "ERROR" + + DefaultChanSize int32 = 100 +) + +// Event represents a single event to a watched resource. +type Event struct { + Type EventType + + // Object is: + // * If Type is Added or Modified: the new state of the object. + // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Error: *api.Status is recommended; other types may make sense + // depending on context. + Object runtime.Object +} + +type emptyWatch chan Event + +// NewEmptyWatch returns a watch interface that returns no results and is closed. +// May be used in certain error conditions where no information is available but +// an error is not warranted. +func NewEmptyWatch() Interface { + ch := make(chan Event) + close(ch) + return emptyWatch(ch) +} + +// Stop implements Interface +func (w emptyWatch) Stop() { +} + +// ResultChan implements Interface +func (w emptyWatch) ResultChan() <-chan Event { + return chan Event(w) +} + +// FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. +type FakeWatcher struct { + result chan Event + Stopped bool + sync.Mutex +} + +func NewFake() *FakeWatcher { + return &FakeWatcher{ + result: make(chan Event), + } +} + +func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher { + return &FakeWatcher{ + result: make(chan Event, size), + } +} + +// Stop implements Interface.Stop(). +func (f *FakeWatcher) Stop() { + f.Lock() + defer f.Unlock() + if !f.Stopped { + glog.V(4).Infof("Stopping fake watcher.") + close(f.result) + f.Stopped = true + } +} + +func (f *FakeWatcher) IsStopped() bool { + f.Lock() + defer f.Unlock() + return f.Stopped +} + +// Reset prepares the watcher to be reused. +func (f *FakeWatcher) Reset() { + f.Lock() + defer f.Unlock() + f.Stopped = false + f.result = make(chan Event) +} + +func (f *FakeWatcher) ResultChan() <-chan Event { + return f.result +} + +// Add sends an add event. +func (f *FakeWatcher) Add(obj runtime.Object) { + f.result <- Event{Added, obj} +} + +// Modify sends a modify event. +func (f *FakeWatcher) Modify(obj runtime.Object) { + f.result <- Event{Modified, obj} +} + +// Delete sends a delete event. +func (f *FakeWatcher) Delete(lastValue runtime.Object) { + f.result <- Event{Deleted, lastValue} +} + +// Error sends an Error event. +func (f *FakeWatcher) Error(errValue runtime.Object) { + f.result <- Event{Error, errValue} +} + +// Action sends an event of the requested type, for table-based testing. +func (f *FakeWatcher) Action(action EventType, obj runtime.Object) { + f.result <- Event{action, obj} +} + +// RaceFreeFakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. +type RaceFreeFakeWatcher struct { + result chan Event + Stopped bool + sync.Mutex +} + +func NewRaceFreeFake() *RaceFreeFakeWatcher { + return &RaceFreeFakeWatcher{ + result: make(chan Event, DefaultChanSize), + } +} + +// Stop implements Interface.Stop(). +func (f *RaceFreeFakeWatcher) Stop() { + f.Lock() + defer f.Unlock() + if !f.Stopped { + glog.V(4).Infof("Stopping fake watcher.") + close(f.result) + f.Stopped = true + } +} + +func (f *RaceFreeFakeWatcher) IsStopped() bool { + f.Lock() + defer f.Unlock() + return f.Stopped +} + +// Reset prepares the watcher to be reused. +func (f *RaceFreeFakeWatcher) Reset() { + f.Lock() + defer f.Unlock() + f.Stopped = false + f.result = make(chan Event, DefaultChanSize) +} + +func (f *RaceFreeFakeWatcher) ResultChan() <-chan Event { + f.Lock() + defer f.Unlock() + return f.result +} + +// Add sends an add event. +func (f *RaceFreeFakeWatcher) Add(obj runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Added, obj}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Modify sends a modify event. +func (f *RaceFreeFakeWatcher) Modify(obj runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Modified, obj}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Delete sends a delete event. +func (f *RaceFreeFakeWatcher) Delete(lastValue runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Deleted, lastValue}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Error sends an Error event. +func (f *RaceFreeFakeWatcher) Error(errValue runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Error, errValue}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Action sends an event of the requested type, for table-based testing. +func (f *RaceFreeFakeWatcher) Action(action EventType, obj runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{action, obj}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go new file mode 100644 index 000000000..c70f431c2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go @@ -0,0 +1,27 @@ +package netutil + +import ( + "net/url" + "strings" +) + +// FROM: http://golang.org/src/net/http/client.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// FROM: http://golang.org/src/net/http/transport.go +// canonicalAddr returns url.Host but always with a ":port" suffix +func CanonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go new file mode 100644 index 000000000..9e45dbe1d --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go @@ -0,0 +1,388 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package reflect is a fork of go's standard library reflection package, which +// allows for deep equal with equality functions defined. +package reflect + +import ( + "fmt" + "reflect" + "strings" +) + +// Equalities is a map from type to a function comparing two values of +// that type. +type Equalities map[reflect.Type]reflect.Value + +// For convenience, panics on errrors +func EqualitiesOrDie(funcs ...interface{}) Equalities { + e := Equalities{} + if err := e.AddFuncs(funcs...); err != nil { + panic(err) + } + return e +} + +// AddFuncs is a shortcut for multiple calls to AddFunc. +func (e Equalities) AddFuncs(funcs ...interface{}) error { + for _, f := range funcs { + if err := e.AddFunc(f); err != nil { + return err + } + } + return nil +} + +// AddFunc uses func as an equality function: it must take +// two parameters of the same type, and return a boolean. +func (e Equalities) AddFunc(eqFunc interface{}) error { + fv := reflect.ValueOf(eqFunc) + ft := fv.Type() + if ft.Kind() != reflect.Func { + return fmt.Errorf("expected func, got: %v", ft) + } + if ft.NumIn() != 2 { + return fmt.Errorf("expected three 'in' params, got: %v", ft) + } + if ft.NumOut() != 1 { + return fmt.Errorf("expected one 'out' param, got: %v", ft) + } + if ft.In(0) != ft.In(1) { + return fmt.Errorf("expected arg 1 and 2 to have same type, but got %v", ft) + } + var forReturnType bool + boolType := reflect.TypeOf(forReturnType) + if ft.Out(0) != boolType { + return fmt.Errorf("expected bool return, got: %v", ft) + } + e[ft.In(0)] = fv + return nil +} + +// Below here is forked from go's reflect/deepequal.go + +// During deepValueEqual, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited comparisons are stored in a map indexed by visit. +type visit struct { + a1 uintptr + a2 uintptr + typ reflect.Type +} + +// unexportedTypePanic is thrown when you use this DeepEqual on something that has an +// unexported type. It indicates a programmer error, so should not occur at runtime, +// which is why it's not public and thus impossible to catch. +type unexportedTypePanic []reflect.Type + +func (u unexportedTypePanic) Error() string { return u.String() } +func (u unexportedTypePanic) String() string { + strs := make([]string, len(u)) + for i, t := range u { + strs[i] = fmt.Sprintf("%v", t) + } + return "an unexported field was encountered, nested like this: " + strings.Join(strs, " -> ") +} + +func makeUsefulPanic(v reflect.Value) { + if x := recover(); x != nil { + if u, ok := x.(unexportedTypePanic); ok { + u = append(unexportedTypePanic{v.Type()}, u...) + x = u + } + panic(x) + } +} + +// Tests for deep equality using reflected types. The map argument tracks +// comparisons that have already been seen, which allows short circuiting on +// recursive types. +func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool { + defer makeUsefulPanic(v1) + + if !v1.IsValid() || !v2.IsValid() { + return v1.IsValid() == v2.IsValid() + } + if v1.Type() != v2.Type() { + return false + } + if fv, ok := e[v1.Type()]; ok { + return fv.Call([]reflect.Value{v1, v2})[0].Bool() + } + + hard := func(k reflect.Kind) bool { + switch k { + case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct: + return true + } + return false + } + + if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) { + addr1 := v1.UnsafeAddr() + addr2 := v2.UnsafeAddr() + if addr1 > addr2 { + // Canonicalize order to reduce number of entries in visited. + addr1, addr2 = addr2, addr1 + } + + // Short circuit if references are identical ... + if addr1 == addr2 { + return true + } + + // ... or already seen + typ := v1.Type() + v := visit{addr1, addr2, typ} + if visited[v] { + return true + } + + // Remember for later. + visited[v] = true + } + + switch v1.Kind() { + case reflect.Array: + // We don't need to check length here because length is part of + // an array's type, which has already been filtered for. + for i := 0; i < v1.Len(); i++ { + if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { + return false + } + } + return true + case reflect.Slice: + if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) { + return false + } + if v1.IsNil() || v1.Len() == 0 { + return true + } + if v1.Len() != v2.Len() { + return false + } + if v1.Pointer() == v2.Pointer() { + return true + } + for i := 0; i < v1.Len(); i++ { + if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { + return false + } + } + return true + case reflect.Interface: + if v1.IsNil() || v2.IsNil() { + return v1.IsNil() == v2.IsNil() + } + return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) + case reflect.Ptr: + return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) + case reflect.Struct: + for i, n := 0, v1.NumField(); i < n; i++ { + if !e.deepValueEqual(v1.Field(i), v2.Field(i), visited, depth+1) { + return false + } + } + return true + case reflect.Map: + if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) { + return false + } + if v1.IsNil() || v1.Len() == 0 { + return true + } + if v1.Len() != v2.Len() { + return false + } + if v1.Pointer() == v2.Pointer() { + return true + } + for _, k := range v1.MapKeys() { + if !e.deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) { + return false + } + } + return true + case reflect.Func: + if v1.IsNil() && v2.IsNil() { + return true + } + // Can't do better than this: + return false + default: + // Normal equality suffices + if !v1.CanInterface() || !v2.CanInterface() { + panic(unexportedTypePanic{}) + } + return v1.Interface() == v2.Interface() + } +} + +// DeepEqual is like reflect.DeepEqual, but focused on semantic equality +// instead of memory equality. +// +// It will use e's equality functions if it finds types that match. +// +// An empty slice *is* equal to a nil slice for our purposes; same for maps. +// +// Unexported field members cannot be compared and will cause an imformative panic; you must add an Equality +// function for these types. +func (e Equalities) DeepEqual(a1, a2 interface{}) bool { + if a1 == nil || a2 == nil { + return a1 == a2 + } + v1 := reflect.ValueOf(a1) + v2 := reflect.ValueOf(a2) + if v1.Type() != v2.Type() { + return false + } + return e.deepValueEqual(v1, v2, make(map[visit]bool), 0) +} + +func (e Equalities) deepValueDerive(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool { + defer makeUsefulPanic(v1) + + if !v1.IsValid() || !v2.IsValid() { + return v1.IsValid() == v2.IsValid() + } + if v1.Type() != v2.Type() { + return false + } + if fv, ok := e[v1.Type()]; ok { + return fv.Call([]reflect.Value{v1, v2})[0].Bool() + } + + hard := func(k reflect.Kind) bool { + switch k { + case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct: + return true + } + return false + } + + if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) { + addr1 := v1.UnsafeAddr() + addr2 := v2.UnsafeAddr() + if addr1 > addr2 { + // Canonicalize order to reduce number of entries in visited. + addr1, addr2 = addr2, addr1 + } + + // Short circuit if references are identical ... + if addr1 == addr2 { + return true + } + + // ... or already seen + typ := v1.Type() + v := visit{addr1, addr2, typ} + if visited[v] { + return true + } + + // Remember for later. + visited[v] = true + } + + switch v1.Kind() { + case reflect.Array: + // We don't need to check length here because length is part of + // an array's type, which has already been filtered for. + for i := 0; i < v1.Len(); i++ { + if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) { + return false + } + } + return true + case reflect.Slice: + if v1.IsNil() || v1.Len() == 0 { + return true + } + if v1.Len() > v2.Len() { + return false + } + if v1.Pointer() == v2.Pointer() { + return true + } + for i := 0; i < v1.Len(); i++ { + if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) { + return false + } + } + return true + case reflect.String: + if v1.Len() == 0 { + return true + } + if v1.Len() > v2.Len() { + return false + } + return v1.String() == v2.String() + case reflect.Interface: + if v1.IsNil() { + return true + } + return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1) + case reflect.Ptr: + if v1.IsNil() { + return true + } + return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1) + case reflect.Struct: + for i, n := 0, v1.NumField(); i < n; i++ { + if !e.deepValueDerive(v1.Field(i), v2.Field(i), visited, depth+1) { + return false + } + } + return true + case reflect.Map: + if v1.IsNil() || v1.Len() == 0 { + return true + } + if v1.Len() > v2.Len() { + return false + } + if v1.Pointer() == v2.Pointer() { + return true + } + for _, k := range v1.MapKeys() { + if !e.deepValueDerive(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) { + return false + } + } + return true + case reflect.Func: + if v1.IsNil() && v2.IsNil() { + return true + } + // Can't do better than this: + return false + default: + // Normal equality suffices + if !v1.CanInterface() || !v2.CanInterface() { + panic(unexportedTypePanic{}) + } + return v1.Interface() == v2.Interface() + } +} + +// DeepDerivative is similar to DeepEqual except that unset fields in a1 are +// ignored (not compared). This allows us to focus on the fields that matter to +// the semantic comparison. +// +// The unset fields include a nil pointer and an empty string. +func (e Equalities) DeepDerivative(a1, a2 interface{}) bool { + if a1 == nil { + return true + } + v1 := reflect.ValueOf(a1) + v2 := reflect.ValueOf(a2) + if v1.Type() != v2.Type() { + return false + } + return e.deepValueDerive(v1, v2, make(map[visit]bool), 0) +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go new file mode 100644 index 000000000..67957ee33 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go @@ -0,0 +1,91 @@ +//This package is copied from Go library reflect/type.go. +//The struct tag library provides no way to extract the list of struct tags, only +//a specific tag +package reflect + +import ( + "fmt" + + "strconv" + "strings" +) + +type StructTag struct { + Name string + Value string +} + +func (t StructTag) String() string { + return fmt.Sprintf("%s:%q", t.Name, t.Value) +} + +type StructTags []StructTag + +func (tags StructTags) String() string { + s := make([]string, 0, len(tags)) + for _, tag := range tags { + s = append(s, tag.String()) + } + return "`" + strings.Join(s, " ") + "`" +} + +func (tags StructTags) Has(name string) bool { + for i := range tags { + if tags[i].Name == name { + return true + } + } + return false +} + +// ParseStructTags returns the full set of fields in a struct tag in the order they appear in +// the struct tag. +func ParseStructTags(tag string) (StructTags, error) { + tags := StructTags{} + for tag != "" { + // Skip leading space. + i := 0 + for i < len(tag) && tag[i] == ' ' { + i++ + } + tag = tag[i:] + if tag == "" { + break + } + + // Scan to colon. A space, a quote or a control character is a syntax error. + // Strictly speaking, control chars include the range [0x7f, 0x9f], not just + // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters + // as it is simpler to inspect the tag's bytes than the tag's runes. + i = 0 + for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { + i++ + } + if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { + break + } + name := string(tag[:i]) + tag = tag[i+1:] + + // Scan quoted string to find value. + i = 1 + for i < len(tag) && tag[i] != '"' { + if tag[i] == '\\' { + i++ + } + i++ + } + if i >= len(tag) { + break + } + qvalue := string(tag[:i+1]) + tag = tag[i+1:] + + value, err := strconv.Unquote(qvalue) + if err != nil { + return nil, err + } + tags = append(tags, StructTag{Name: name, Value: value}) + } + return tags, nil +} diff --git a/vendor/k8s.io/apiserver/LICENSE b/vendor/k8s.io/apiserver/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/apiserver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apiserver/README.md b/vendor/k8s.io/apiserver/README.md new file mode 100644 index 000000000..96927ae70 --- /dev/null +++ b/vendor/k8s.io/apiserver/README.md @@ -0,0 +1,30 @@ +# apiserver + +Generic library for building a Kubernetes aggregated API server. + + +## Purpose + +This library contains code to create Kubernetes aggregation server complete with delegated authentication and authorization, +`kubectl` compatible discovery information, optional admission chain, and versioned types. It's first consumers are +`k8s.io/kubernetes`, `k8s.io/kube-aggregator`, and `github.com/kubernetes-incubator/service-catalog`. + + +## Compatibility + +There are *NO compatibility guarantees* for this repository, yet. It is in direct support of Kubernetes, so branches +will track Kubernetes and be compatible with that repo. As we more cleanly separate the layers, we will review the +compatibility guarantee. We have a goal to make this easier to use in 2017. + + +## Where does it come from? + +`apiserver` is synced from https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiserver. +Code changes are made in that location, merged into `k8s.io/kubernetes` and later synced here. + + +## Things you should *NOT* do + + 1. Directly modify any files under `pkg` in this repo. Those are driven from `k8s.io/kuberenetes/staging/src/k8s.io/apiserver`. + 2. Expect compatibility. This repo is changing quickly in direct support of + Kubernetes and the API isn't yet stable enough for API guarantees. diff --git a/vendor/k8s.io/apiserver/pkg/server/httplog/doc.go b/vendor/k8s.io/apiserver/pkg/server/httplog/doc.go new file mode 100644 index 000000000..caa6572c7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/httplog/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package httplog contains a helper object and functions to maintain a log +// along with an http response. +package httplog // import "k8s.io/apiserver/pkg/server/httplog" diff --git a/vendor/k8s.io/apiserver/pkg/server/httplog/log.go b/vendor/k8s.io/apiserver/pkg/server/httplog/log.go new file mode 100644 index 000000000..4a4894cee --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/httplog/log.go @@ -0,0 +1,225 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httplog + +import ( + "bufio" + "fmt" + "net" + "net/http" + "runtime" + "time" + + "github.com/golang/glog" +) + +// Handler wraps all HTTP calls to delegate with nice logging. +// delegate may use LogOf(w).Addf(...) to write additional info to +// the per-request log message. +// +// Intended to wrap calls to your ServeMux. +func Handler(delegate http.Handler, pred StacktracePred) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + defer NewLogged(req, &w).StacktraceWhen(pred).Log() + delegate.ServeHTTP(w, req) + }) +} + +// StacktracePred returns true if a stacktrace should be logged for this status. +type StacktracePred func(httpStatus int) (logStacktrace bool) + +type logger interface { + Addf(format string, data ...interface{}) +} + +// Add a layer on top of ResponseWriter, so we can track latency and error +// message sources. +// +// TODO now that we're using go-restful, we shouldn't need to be wrapping +// the http.ResponseWriter. We can recover panics from go-restful, and +// the logging value is questionable. +type respLogger struct { + hijacked bool + statusRecorded bool + status int + statusStack string + addedInfo string + startTime time.Time + + captureErrorOutput bool + + req *http.Request + w http.ResponseWriter + + logStacktracePred StacktracePred +} + +// Simple logger that logs immediately when Addf is called +type passthroughLogger struct{} + +// Addf logs info immediately. +func (passthroughLogger) Addf(format string, data ...interface{}) { + glog.V(2).Info(fmt.Sprintf(format, data...)) +} + +// DefaultStacktracePred is the default implementation of StacktracePred. +func DefaultStacktracePred(status int) bool { + return (status < http.StatusOK || status >= http.StatusInternalServerError) && status != http.StatusSwitchingProtocols +} + +// NewLogged turns a normal response writer into a logged response writer. +// +// Usage: +// +// defer NewLogged(req, &w).StacktraceWhen(StatusIsNot(200, 202)).Log() +// +// (Only the call to Log() is deferred, so you can set everything up in one line!) +// +// Note that this *changes* your writer, to route response writing actions +// through the logger. +// +// Use LogOf(w).Addf(...) to log something along with the response result. +func NewLogged(req *http.Request, w *http.ResponseWriter) *respLogger { + if _, ok := (*w).(*respLogger); ok { + // Don't double-wrap! + panic("multiple NewLogged calls!") + } + rl := &respLogger{ + startTime: time.Now(), + req: req, + w: *w, + logStacktracePred: DefaultStacktracePred, + } + *w = rl // hijack caller's writer! + return rl +} + +// LogOf returns the logger hiding in w. If there is not an existing logger +// then a passthroughLogger will be created which will log to stdout immediately +// when Addf is called. +func LogOf(req *http.Request, w http.ResponseWriter) logger { + if _, exists := w.(*respLogger); !exists { + pl := &passthroughLogger{} + return pl + } + if rl, ok := w.(*respLogger); ok { + return rl + } + panic("Unable to find or create the logger!") +} + +// Unlogged returns the original ResponseWriter, or w if it is not our inserted logger. +func Unlogged(w http.ResponseWriter) http.ResponseWriter { + if rl, ok := w.(*respLogger); ok { + return rl.w + } + return w +} + +// StacktraceWhen sets the stacktrace logging predicate, which decides when to log a stacktrace. +// There's a default, so you don't need to call this unless you don't like the default. +func (rl *respLogger) StacktraceWhen(pred StacktracePred) *respLogger { + rl.logStacktracePred = pred + return rl +} + +// StatusIsNot returns a StacktracePred which will cause stacktraces to be logged +// for any status *not* in the given list. +func StatusIsNot(statuses ...int) StacktracePred { + return func(status int) bool { + for _, s := range statuses { + if status == s { + return false + } + } + return true + } +} + +// Addf adds additional data to be logged with this request. +func (rl *respLogger) Addf(format string, data ...interface{}) { + rl.addedInfo += "\n" + fmt.Sprintf(format, data...) +} + +// Log is intended to be called once at the end of your request handler, via defer +func (rl *respLogger) Log() { + latency := time.Since(rl.startTime) + if glog.V(2) { + if !rl.hijacked { + glog.InfoDepth(1, fmt.Sprintf("%s %s: (%v) %v%v%v [%s %s]", rl.req.Method, rl.req.RequestURI, latency, rl.status, rl.statusStack, rl.addedInfo, rl.req.Header["User-Agent"], rl.req.RemoteAddr)) + } else { + glog.InfoDepth(1, fmt.Sprintf("%s %s: (%v) hijacked [%s %s]", rl.req.Method, rl.req.RequestURI, latency, rl.req.Header["User-Agent"], rl.req.RemoteAddr)) + } + } +} + +// Header implements http.ResponseWriter. +func (rl *respLogger) Header() http.Header { + return rl.w.Header() +} + +// Write implements http.ResponseWriter. +func (rl *respLogger) Write(b []byte) (int, error) { + if !rl.statusRecorded { + rl.recordStatus(http.StatusOK) // Default if WriteHeader hasn't been called + } + if rl.captureErrorOutput { + rl.Addf("logging error output: %q\n", string(b)) + } + return rl.w.Write(b) +} + +// Flush implements http.Flusher even if the underlying http.Writer doesn't implement it. +// Flush is used for streaming purposes and allows to flush buffered data to the client. +func (rl *respLogger) Flush() { + if flusher, ok := rl.w.(http.Flusher); ok { + flusher.Flush() + } else if glog.V(2) { + glog.InfoDepth(1, fmt.Sprintf("Unable to convert %+v into http.Flusher", rl.w)) + } +} + +// WriteHeader implements http.ResponseWriter. +func (rl *respLogger) WriteHeader(status int) { + rl.recordStatus(status) + rl.w.WriteHeader(status) +} + +// Hijack implements http.Hijacker. +func (rl *respLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) { + rl.hijacked = true + return rl.w.(http.Hijacker).Hijack() +} + +// CloseNotify implements http.CloseNotifier +func (rl *respLogger) CloseNotify() <-chan bool { + return rl.w.(http.CloseNotifier).CloseNotify() +} + +func (rl *respLogger) recordStatus(status int) { + rl.status = status + rl.statusRecorded = true + if rl.logStacktracePred(status) { + // Only log stacks for errors + stack := make([]byte, 50*1024) + stack = stack[:runtime.Stack(stack, false)] + rl.statusStack = "\n" + string(stack) + rl.captureErrorOutput = true + } else { + rl.statusStack = "" + } +} diff --git a/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go b/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go new file mode 100644 index 000000000..f01638ad6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go @@ -0,0 +1,349 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wsstream + +import ( + "encoding/base64" + "fmt" + "io" + "net/http" + "regexp" + "strings" + "time" + + "github.com/golang/glog" + "golang.org/x/net/websocket" + + "k8s.io/apimachinery/pkg/util/runtime" +) + +// The Websocket subprotocol "channel.k8s.io" prepends each binary message with a byte indicating +// the channel number (zero indexed) the message was sent on. Messages in both directions should +// prefix their messages with this channel byte. When used for remote execution, the channel numbers +// are by convention defined to match the POSIX file-descriptors assigned to STDIN, STDOUT, and STDERR +// (0, 1, and 2). No other conversion is performed on the raw subprotocol - writes are sent as they +// are received by the server. +// +// Example client session: +// +// CONNECT http://server.com with subprotocol "channel.k8s.io" +// WRITE []byte{0, 102, 111, 111, 10} # send "foo\n" on channel 0 (STDIN) +// READ []byte{1, 10} # receive "\n" on channel 1 (STDOUT) +// CLOSE +// +const ChannelWebSocketProtocol = "channel.k8s.io" + +// The Websocket subprotocol "base64.channel.k8s.io" base64 encodes each message with a character +// indicating the channel number (zero indexed) the message was sent on. Messages in both directions +// should prefix their messages with this channel char. When used for remote execution, the channel +// numbers are by convention defined to match the POSIX file-descriptors assigned to STDIN, STDOUT, +// and STDERR ('0', '1', and '2'). The data received on the server is base64 decoded (and must be +// be valid) and data written by the server to the client is base64 encoded. +// +// Example client session: +// +// CONNECT http://server.com with subprotocol "base64.channel.k8s.io" +// WRITE []byte{48, 90, 109, 57, 118, 67, 103, 111, 61} # send "foo\n" (base64: "Zm9vCgo=") on channel '0' (STDIN) +// READ []byte{49, 67, 103, 61, 61} # receive "\n" (base64: "Cg==") on channel '1' (STDOUT) +// CLOSE +// +const Base64ChannelWebSocketProtocol = "base64.channel.k8s.io" + +type codecType int + +const ( + rawCodec codecType = iota + base64Codec +) + +type ChannelType int + +const ( + IgnoreChannel ChannelType = iota + ReadChannel + WriteChannel + ReadWriteChannel +) + +var ( + // connectionUpgradeRegex matches any Connection header value that includes upgrade + connectionUpgradeRegex = regexp.MustCompile("(^|.*,\\s*)upgrade($|\\s*,)") +) + +// IsWebSocketRequest returns true if the incoming request contains connection upgrade headers +// for WebSockets. +func IsWebSocketRequest(req *http.Request) bool { + return connectionUpgradeRegex.MatchString(strings.ToLower(req.Header.Get("Connection"))) && strings.ToLower(req.Header.Get("Upgrade")) == "websocket" +} + +// IgnoreReceives reads from a WebSocket until it is closed, then returns. If timeout is set, the +// read and write deadlines are pushed every time a new message is received. +func IgnoreReceives(ws *websocket.Conn, timeout time.Duration) { + defer runtime.HandleCrash() + var data []byte + for { + resetTimeout(ws, timeout) + if err := websocket.Message.Receive(ws, &data); err != nil { + return + } + } +} + +// handshake ensures the provided user protocol matches one of the allowed protocols. It returns +// no error if no protocol is specified. +func handshake(config *websocket.Config, req *http.Request, allowed []string) error { + protocols := config.Protocol + if len(protocols) == 0 { + protocols = []string{""} + } + + for _, protocol := range protocols { + for _, allow := range allowed { + if allow == protocol { + config.Protocol = []string{protocol} + return nil + } + } + } + + return fmt.Errorf("requested protocol(s) are not supported: %v; supports %v", config.Protocol, allowed) +} + +// ChannelProtocolConfig describes a websocket subprotocol with channels. +type ChannelProtocolConfig struct { + Binary bool + Channels []ChannelType +} + +// NewDefaultChannelProtocols returns a channel protocol map with the +// subprotocols "", "channel.k8s.io", "base64.channel.k8s.io" and the given +// channels. +func NewDefaultChannelProtocols(channels []ChannelType) map[string]ChannelProtocolConfig { + return map[string]ChannelProtocolConfig{ + "": {Binary: true, Channels: channels}, + ChannelWebSocketProtocol: {Binary: true, Channels: channels}, + Base64ChannelWebSocketProtocol: {Binary: false, Channels: channels}, + } +} + +// Conn supports sending multiple binary channels over a websocket connection. +type Conn struct { + protocols map[string]ChannelProtocolConfig + selectedProtocol string + channels []*websocketChannel + codec codecType + ready chan struct{} + ws *websocket.Conn + timeout time.Duration +} + +// NewConn creates a WebSocket connection that supports a set of channels. Channels begin each +// web socket message with a single byte indicating the channel number (0-N). 255 is reserved for +// future use. The channel types for each channel are passed as an array, supporting the different +// duplex modes. Read and Write refer to whether the channel can be used as a Reader or Writer. +// +// The protocols parameter maps subprotocol names to ChannelProtocols. The empty string subprotocol +// name is used if websocket.Config.Protocol is empty. +func NewConn(protocols map[string]ChannelProtocolConfig) *Conn { + return &Conn{ + ready: make(chan struct{}), + protocols: protocols, + } +} + +// SetIdleTimeout sets the interval for both reads and writes before timeout. If not specified, +// there is no timeout on the connection. +func (conn *Conn) SetIdleTimeout(duration time.Duration) { + conn.timeout = duration +} + +// Open the connection and create channels for reading and writing. It returns +// the selected subprotocol, a slice of channels and an error. +func (conn *Conn) Open(w http.ResponseWriter, req *http.Request) (string, []io.ReadWriteCloser, error) { + go func() { + defer runtime.HandleCrash() + defer conn.Close() + websocket.Server{Handshake: conn.handshake, Handler: conn.handle}.ServeHTTP(w, req) + }() + <-conn.ready + rwc := make([]io.ReadWriteCloser, len(conn.channels)) + for i := range conn.channels { + rwc[i] = conn.channels[i] + } + return conn.selectedProtocol, rwc, nil +} + +func (conn *Conn) initialize(ws *websocket.Conn) { + negotiated := ws.Config().Protocol + conn.selectedProtocol = negotiated[0] + p := conn.protocols[conn.selectedProtocol] + if p.Binary { + conn.codec = rawCodec + } else { + conn.codec = base64Codec + } + conn.ws = ws + conn.channels = make([]*websocketChannel, len(p.Channels)) + for i, t := range p.Channels { + switch t { + case ReadChannel: + conn.channels[i] = newWebsocketChannel(conn, byte(i), true, false) + case WriteChannel: + conn.channels[i] = newWebsocketChannel(conn, byte(i), false, true) + case ReadWriteChannel: + conn.channels[i] = newWebsocketChannel(conn, byte(i), true, true) + case IgnoreChannel: + conn.channels[i] = newWebsocketChannel(conn, byte(i), false, false) + } + } + + close(conn.ready) +} + +func (conn *Conn) handshake(config *websocket.Config, req *http.Request) error { + supportedProtocols := make([]string, 0, len(conn.protocols)) + for p := range conn.protocols { + supportedProtocols = append(supportedProtocols, p) + } + return handshake(config, req, supportedProtocols) +} + +func (conn *Conn) resetTimeout() { + if conn.timeout > 0 { + conn.ws.SetDeadline(time.Now().Add(conn.timeout)) + } +} + +// Close is only valid after Open has been called +func (conn *Conn) Close() error { + <-conn.ready + for _, s := range conn.channels { + s.Close() + } + conn.ws.Close() + return nil +} + +// handle implements a websocket handler. +func (conn *Conn) handle(ws *websocket.Conn) { + defer conn.Close() + conn.initialize(ws) + + for { + conn.resetTimeout() + var data []byte + if err := websocket.Message.Receive(ws, &data); err != nil { + if err != io.EOF { + glog.Errorf("Error on socket receive: %v", err) + } + break + } + if len(data) == 0 { + continue + } + channel := data[0] + if conn.codec == base64Codec { + channel = channel - '0' + } + data = data[1:] + if int(channel) >= len(conn.channels) { + glog.V(6).Infof("Frame is targeted for a reader %d that is not valid, possible protocol error", channel) + continue + } + if _, err := conn.channels[channel].DataFromSocket(data); err != nil { + glog.Errorf("Unable to write frame to %d: %v\n%s", channel, err, string(data)) + continue + } + } +} + +// write multiplexes the specified channel onto the websocket +func (conn *Conn) write(num byte, data []byte) (int, error) { + conn.resetTimeout() + switch conn.codec { + case rawCodec: + frame := make([]byte, len(data)+1) + frame[0] = num + copy(frame[1:], data) + if err := websocket.Message.Send(conn.ws, frame); err != nil { + return 0, err + } + case base64Codec: + frame := string('0'+num) + base64.StdEncoding.EncodeToString(data) + if err := websocket.Message.Send(conn.ws, frame); err != nil { + return 0, err + } + } + return len(data), nil +} + +// websocketChannel represents a channel in a connection +type websocketChannel struct { + conn *Conn + num byte + r io.Reader + w io.WriteCloser + + read, write bool +} + +// newWebsocketChannel creates a pipe for writing to a websocket. Do not write to this pipe +// prior to the connection being opened. It may be no, half, or full duplex depending on +// read and write. +func newWebsocketChannel(conn *Conn, num byte, read, write bool) *websocketChannel { + r, w := io.Pipe() + return &websocketChannel{conn, num, r, w, read, write} +} + +func (p *websocketChannel) Write(data []byte) (int, error) { + if !p.write { + return len(data), nil + } + return p.conn.write(p.num, data) +} + +// DataFromSocket is invoked by the connection receiver to move data from the connection +// into a specific channel. +func (p *websocketChannel) DataFromSocket(data []byte) (int, error) { + if !p.read { + return len(data), nil + } + + switch p.conn.codec { + case rawCodec: + return p.w.Write(data) + case base64Codec: + dst := make([]byte, len(data)) + n, err := base64.StdEncoding.Decode(dst, data) + if err != nil { + return 0, err + } + return p.w.Write(dst[:n]) + } + return 0, nil +} + +func (p *websocketChannel) Read(data []byte) (int, error) { + if !p.read { + return 0, io.EOF + } + return p.r.Read(data) +} + +func (p *websocketChannel) Close() error { + return p.w.Close() +} diff --git a/vendor/k8s.io/apiserver/pkg/util/wsstream/doc.go b/vendor/k8s.io/apiserver/pkg/util/wsstream/doc.go new file mode 100644 index 000000000..694ce81d2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/wsstream/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package wsstream contains utilities for streaming content over WebSockets. +// The Conn type allows callers to multiplex multiple read/write channels over +// a single websocket. The Reader type allows an io.Reader to be copied over +// a websocket channel as binary content. +package wsstream // import "k8s.io/apiserver/pkg/util/wsstream" diff --git a/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go b/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go new file mode 100644 index 000000000..9dd165bfa --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go @@ -0,0 +1,177 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wsstream + +import ( + "encoding/base64" + "io" + "net/http" + "sync" + "time" + + "golang.org/x/net/websocket" + + "k8s.io/apimachinery/pkg/util/runtime" +) + +// The WebSocket subprotocol "binary.k8s.io" will only send messages to the +// client and ignore messages sent to the server. The received messages are +// the exact bytes written to the stream. Zero byte messages are possible. +const binaryWebSocketProtocol = "binary.k8s.io" + +// The WebSocket subprotocol "base64.binary.k8s.io" will only send messages to the +// client and ignore messages sent to the server. The received messages are +// a base64 version of the bytes written to the stream. Zero byte messages are +// possible. +const base64BinaryWebSocketProtocol = "base64.binary.k8s.io" + +// ReaderProtocolConfig describes a websocket subprotocol with one stream. +type ReaderProtocolConfig struct { + Binary bool +} + +// NewDefaultReaderProtocols returns a stream protocol map with the +// subprotocols "", "channel.k8s.io", "base64.channel.k8s.io". +func NewDefaultReaderProtocols() map[string]ReaderProtocolConfig { + return map[string]ReaderProtocolConfig{ + "": {Binary: true}, + binaryWebSocketProtocol: {Binary: true}, + base64BinaryWebSocketProtocol: {Binary: false}, + } +} + +// Reader supports returning an arbitrary byte stream over a websocket channel. +type Reader struct { + err chan error + r io.Reader + ping bool + timeout time.Duration + protocols map[string]ReaderProtocolConfig + selectedProtocol string + + handleCrash func() // overridable for testing +} + +// NewReader creates a WebSocket pipe that will copy the contents of r to a provided +// WebSocket connection. If ping is true, a zero length message will be sent to the client +// before the stream begins reading. +// +// The protocols parameter maps subprotocol names to StreamProtocols. The empty string +// subprotocol name is used if websocket.Config.Protocol is empty. +func NewReader(r io.Reader, ping bool, protocols map[string]ReaderProtocolConfig) *Reader { + return &Reader{ + r: r, + err: make(chan error), + ping: ping, + protocols: protocols, + handleCrash: func() { runtime.HandleCrash() }, + } +} + +// SetIdleTimeout sets the interval for both reads and writes before timeout. If not specified, +// there is no timeout on the reader. +func (r *Reader) SetIdleTimeout(duration time.Duration) { + r.timeout = duration +} + +func (r *Reader) handshake(config *websocket.Config, req *http.Request) error { + supportedProtocols := make([]string, 0, len(r.protocols)) + for p := range r.protocols { + supportedProtocols = append(supportedProtocols, p) + } + return handshake(config, req, supportedProtocols) +} + +// Copy the reader to the response. The created WebSocket is closed after this +// method completes. +func (r *Reader) Copy(w http.ResponseWriter, req *http.Request) error { + go func() { + defer r.handleCrash() + websocket.Server{Handshake: r.handshake, Handler: r.handle}.ServeHTTP(w, req) + }() + return <-r.err +} + +// handle implements a WebSocket handler. +func (r *Reader) handle(ws *websocket.Conn) { + // Close the connection when the client requests it, or when we finish streaming, whichever happens first + closeConnOnce := &sync.Once{} + closeConn := func() { + closeConnOnce.Do(func() { + ws.Close() + }) + } + + negotiated := ws.Config().Protocol + r.selectedProtocol = negotiated[0] + defer close(r.err) + defer closeConn() + + go func() { + defer runtime.HandleCrash() + // This blocks until the connection is closed. + // Client should not send anything. + IgnoreReceives(ws, r.timeout) + // Once the client closes, we should also close + closeConn() + }() + + r.err <- messageCopy(ws, r.r, !r.protocols[r.selectedProtocol].Binary, r.ping, r.timeout) +} + +func resetTimeout(ws *websocket.Conn, timeout time.Duration) { + if timeout > 0 { + ws.SetDeadline(time.Now().Add(timeout)) + } +} + +func messageCopy(ws *websocket.Conn, r io.Reader, base64Encode, ping bool, timeout time.Duration) error { + buf := make([]byte, 2048) + if ping { + resetTimeout(ws, timeout) + if base64Encode { + if err := websocket.Message.Send(ws, ""); err != nil { + return err + } + } else { + if err := websocket.Message.Send(ws, []byte{}); err != nil { + return err + } + } + } + for { + resetTimeout(ws, timeout) + n, err := r.Read(buf) + if err != nil { + if err == io.EOF { + return nil + } + return err + } + if n > 0 { + if base64Encode { + if err := websocket.Message.Send(ws, base64.StdEncoding.EncodeToString(buf[:n])); err != nil { + return err + } + } else { + if err := websocket.Message.Send(ws, buf[:n]); err != nil { + return err + } + } + } + } +} diff --git a/vendor/k8s.io/client-go/LICENSE b/vendor/k8s.io/client-go/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/client-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/client-go/README.md b/vendor/k8s.io/client-go/README.md new file mode 100644 index 000000000..48fe02fa8 --- /dev/null +++ b/vendor/k8s.io/client-go/README.md @@ -0,0 +1,149 @@ +# client-go + +Go clients for talking to a [kubernetes](http://kubernetes.io/) cluster. + +We currently recommend using the v2.0.0 tag. See [INSTALL.md](/INSTALL.md) for +detailed installation instructions. `go get k8s.io/client-go/...` works, but +will give you head and doesn't handle the dependencies well. + +[![Build Status](https://travis-ci.org/kubernetes/client-go.svg?branch=master)](https://travis-ci.org/kubernetes/client-go) +[![GoDoc](https://godoc.org/k8s.io/client-go?status.svg)](https://godoc.org/k8s.io/client-go) + +## Table of Contents + +- [What's included](#whats-included) +- [Versioning](#versioning) + - [Compatibility: your code <-> client-go](#compatibility-your-code---client-go) + - [Compatibility: client-go <-> Kubernetes clusters](#compatibility-client-go---kubernetes-clusters) + - [Compatibility matrix](#compatibility-matrix) + - [Why do the 1.4 and 1.5 branch contain top-level folder named after the version?](#why-do-the-14-and-15-branch-contain-top-level-folder-named-after-the-version) +- [How to get it](#how-to-get-it) +- [How to use it](#how-to-use-it) +- [Dependency management](#dependency-management) +- [Contributing code](#contributing-code) + +### What's included + +* The `kubernetes` package contains the clientset to access Kubernetes API. +* The `discovery` package is used to discover APIs supported by a Kubernetes API server. +* The `dynamic` package contains a dynamic client that can perform generic operations on arbitrary Kubernetes API objects. +* The `transport` package is used to set up auth and start a connection. +* The `tools/cache` package is useful for writing controllers. + +### Versioning + +`client-go` follows [semver](http://semver.org/). We will not make +backwards-incompatible changes without incrementing the major version number. A +change is backwards-incompatible either if it *i)* changes the public interfaces +of `client-go`, or *ii)* makes `client-go` incompatible with otherwise supported +versions of Kubernetes clusters. + +Changes that add features in a backwards-compatible way will result in bumping +the minor version (second digit) number. + +Bugfixes will result in the patch version (third digit) changing. PRs that are +cherry-picked into an older Kubernetes release branch will result in an update +to the corresponding branch in `client-go`, with a corresponding new tag +changing the patch version. + +A consequence of this is that `client-go` version numbers will be unrelated to +Kubernetes version numbers. + +#### Branches and tags. + +We will create a new branch and tag for each increment in the major version number or +minor version number. We will create only a new tag for each increment in the patch +version number. See [semver](http://semver.org/) for definitions of major, +minor, and patch. + +The master branch will track HEAD in the main Kubernetes repo and +accumulate changes. Consider HEAD to have the version `x.(y+1).0-alpha` or +`(x+1).0.0-alpha` (depending on whether it has accumulated a breaking change or +not), where `x` and `y` are the current major and minor versions. + +#### Compatibility: your code <-> client-go + +`client-go` follows [semver](http://semver.org/), so until the major version of +client-go gets increased, your code will compile and will continue to work with +explicitly supported versions of Kubernetes clusters. You must use a dependency +management system and pin a specific major version of `client-go` to get this +benefit, as HEAD follows the upstream Kubernetes repo. + +#### Compatibility: client-go <-> Kubernetes clusters + +Since Kubernetes is backwards compatible with clients, older `client-go` +versions will work with many different Kubernetes cluster versions. + +We will backport bugfixes--but not new features--into older versions of +`client-go`. + + +#### Compatibility matrix + +| | Kubernetes 1.3 | Kubernetes 1.4 | Kubernetes 1.5 | Kubernetes 1.6 | +|---------------------|----------------|----------------|----------------|----------------| +| client-go 1.4 | + | ✓ | - | - | +| client-go 1.5 | + | + | - | - | +| client-go 2.0 | + | + | ✓ | - | +| client-go 3.0 beta | + | + | + | ✓ | +| client-go HEAD | + | + | + | + | + +Key: + +* `✓` Exactly the same features / API objects in both client-go and the Kubernetes + version. +* `+` client-go has features or api objects that may not be present in the + Kubernetes cluster, but everything they have in common will work. +* `-` The Kubernetes cluster has features the client-go library can't use + (additional API objects, etc). + +See the [CHANGELOG](./CHANGELOG.md) for a detailed description of changes +between client-go versions. + +| Branch | Canonical source code location | Maintenance status | +|----------------|--------------------------------------|-------------------------------| +| client-go 1.4 | Kubernetes main repo, 1.4 branch | = - | +| client-go 1.5 | Kubernetes main repo, 1.5 branch | = - | +| client-go 2.0 | Kubernetes main repo, 1.5 branch | ✓ | +| client-go 3.0 | Kubernetes main repo, 1.6 branch | ✓ | +| client-go HEAD | Kubernetes main repo, master branch | ✓ | + +Key: + +* `✓` Changes in main Kubernetes repo are actively published to client-go by a bot +* `=` Maintenance is manual, only severe security bugs will be patched. +* `-` Deprecated; please upgrade. + +#### Deprecation policy + +We will maintain branches for at least six months after their first stable tag +is cut. (E.g., the clock for the release-2.0 branch started ticking when we +tagged v2.0.0, not when we made the first alpha.) This policy applies to +every version greater than or equal to 2.0. + +#### Why do the 1.4 and 1.5 branch contain top-level folder named after the version? + +For the initial release of client-go, we thought it would be easiest to keep +separate directories for each minor version. That soon proved to be a mistake. +We are keeping the top-level folders in the 1.4 and 1.5 branches so that +existing users won't be broken. + +### How to get it + +You can use `go get k8s.io/client-go/...` to get client-go, but **you will get +the unstable master branch** and `client-go`'s vendored dependencies will not be +added to your `$GOPATH`. So we think most users will want to use a dependency +management system. See [INSTALL.md](/INSTALL.md) for detailed instructions. + +### How to use it + +If your application runs in a Pod in the cluster, please refer to the in-cluster [example](examples/in-cluster/main.go), otherwise please refer to the out-of-cluster [example](examples/out-of-cluster/main.go). + +### Dependency management + +If your application depends on a package that client-go depends on, and you let the Go compiler find the dependency in `GOPATH`, you will end up with duplicated dependencies: one copy from the `GOPATH`, and one from the vendor folder of client-go. This will cause unexpected runtime error like flag redefinition, since the go compiler ends up importing both packages separately, even if they are exactly the same thing. If this happens, you can either +* run `godep restore` ([godep](https://github.com/tools/godep)) in the client-go/ folder, then remove the vendor folder of client-go. Then the packages in your GOPATH will be the only copy +* or run `godep save` in your application folder to flatten all dependencies. + +### Contributing code +Please send pull requests against the client packages in the Kubernetes main [repository](https://github.com/kubernetes/kubernetes), and run the `/staging/copy.sh` script to update the staging area in the main repository. Changes in the staging area will be published to this repository every day. diff --git a/vendor/k8s.io/client-go/pkg/api/annotation_key_constants.go b/vendor/k8s.io/client-go/pkg/api/annotation_key_constants.go new file mode 100644 index 000000000..e1080a34a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/annotation_key_constants.go @@ -0,0 +1,113 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file should be consistent with pkg/api/v1/annotation_key_constants.go. + +package api + +const ( + // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy + // webhook backend fails. + ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" + + // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation + PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude" + + // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods + MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" + + // TolerationsAnnotationKey represents the key of tolerations data (json serialized) + // in the Annotations of a Pod. + TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" + + // TaintsAnnotationKey represents the key of taints data (json serialized) + // in the Annotations of a Node. + TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" + + // SeccompPodAnnotationKey represents the key of a seccomp profile applied + // to all containers of a pod. + SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" + + // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied + // to one container of a pod. + SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" + + // CreatedByAnnotation represents the key used to store the spec(json) + // used to create the resource. + CreatedByAnnotation = "kubernetes.io/created-by" + + // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) + // in the Annotations of a Node. + PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" + + // SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by + // the kubelet. Pods with other sysctls will fail to launch. + SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls" + + // UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly + // namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use + // is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet + // will fail to launch. + UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls" + + // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // an object (e.g. secret, config map) before fetching it again from apiserver. + // This annotation can be attached to node. + ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + + // AffinityAnnotationKey represents the key of affinity data (json serialized) + // in the Annotations of a Pod. + // TODO: remove when alpha support for affinity is removed + AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" + + // annotation key prefix used to identify non-convertible json paths. + NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" + + kubectlPrefix = "kubectl.kubernetes.io/" + + // LastAppliedConfigAnnotation is the annotation used to store the previous + // configuration of a resource for use in a three way diff by UpdateApplyAnnotation. + LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" + + // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers + // + // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to + // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow + // access only from the CIDRs currently allocated to MIT & the USPS. + // + // Not all cloud providers support this annotation, though AWS & GCE do. + AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" + + // AnnotationValueExternalTrafficLocal Value of annotation to specify local endpoints behavior. + AnnotationValueExternalTrafficLocal = "OnlyLocal" + // AnnotationValueExternalTrafficGlobal Value of annotation to specify global (legacy) behavior. + AnnotationValueExternalTrafficGlobal = "Global" + + // TODO: The beta annotations have been deprecated, remove them when we release k8s 1.8. + + // BetaAnnotationHealthCheckNodePort Annotation specifying the healthcheck nodePort for the service. + // If not specified, annotation is created by the service api backend with the allocated nodePort. + // Will use user-specified nodePort value if specified by the client. + BetaAnnotationHealthCheckNodePort = "service.beta.kubernetes.io/healthcheck-nodeport" + + // BetaAnnotationExternalTraffic An annotation that denotes if this Service desires to route + // external traffic to local endpoints only. This preserves Source IP and avoids a second hop. + BetaAnnotationExternalTraffic = "service.beta.kubernetes.io/external-traffic" +) diff --git a/vendor/k8s.io/client-go/pkg/api/doc.go b/vendor/k8s.io/client-go/pkg/api/doc.go new file mode 100644 index 000000000..a4262ab36 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package api contains the latest (or "internal") version of the +// Kubernetes API objects. This is the API objects as represented in memory. +// The contract presented to clients is located in the versioned packages, +// which are sub-directories. The first one is "v1". Those packages +// describe how a particular version is serialized to storage/network. +package api diff --git a/vendor/k8s.io/client-go/pkg/api/field_constants.go b/vendor/k8s.io/client-go/pkg/api/field_constants.go new file mode 100644 index 000000000..5ead0f13f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/field_constants.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +// Field path constants that are specific to the internal API +// representation. +const ( + NodeUnschedulableField = "spec.unschedulable" + ObjectNameField = "metadata.name" + PodHostField = "spec.nodeName" + PodStatusField = "status.phase" + SecretTypeField = "type" + + EventReasonField = "reason" + EventSourceField = "source" + EventTypeField = "type" + EventInvolvedKindField = "involvedObject.kind" + EventInvolvedNamespaceField = "involvedObject.namespace" + EventInvolvedNameField = "involvedObject.name" + EventInvolvedUIDField = "involvedObject.uid" + EventInvolvedAPIVersionField = "involvedObject.apiVersion" + EventInvolvedResourceVersionField = "involvedObject.resourceVersion" + EventInvolvedFieldPathField = "involvedObject.fieldPath" +) diff --git a/vendor/k8s.io/client-go/pkg/api/json.go b/vendor/k8s.io/client-go/pkg/api/json.go new file mode 100644 index 000000000..3a6e04c18 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/json.go @@ -0,0 +1,28 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import "encoding/json" + +// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations +// to prevent anyone from marshaling these internal structs. + +var _ = json.Marshaler(&AvoidPods{}) +var _ = json.Unmarshaler(&AvoidPods{}) + +func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") } +func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") } diff --git a/vendor/k8s.io/client-go/pkg/api/objectreference.go b/vendor/k8s.io/client-go/pkg/api/objectreference.go new file mode 100644 index 000000000..b36ecab27 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/objectreference.go @@ -0,0 +1,34 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package api + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/client-go/pkg/api/register.go b/vendor/k8s.io/client-go/pkg/api/register.go new file mode 100644 index 000000000..5f261bdfb --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/register.go @@ -0,0 +1,123 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "os" + + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// GroupFactoryRegistry is the APIGroupFactoryRegistry (overlaps a bit with Registry, see comments in package for details) +var GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry) + +// Registry is an instance of an API registry. This is an interim step to start removing the idea of a global +// API registry. +var Registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS")) + +// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. +// NOTE: If you are copying this file to start a new api group, STOP! Copy the +// extensions group instead. This Scheme is special and should appear ONLY in +// the api group, unless you really know what you're doing. +// TODO(lavalamp): make the above error impossible. +var Scheme = runtime.NewScheme() + +// Codecs provides access to encoding and decoding for the scheme +var Codecs = serializer.NewCodecFactory(Scheme) + +// GroupName is the group name use in this package +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(Scheme) + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { + return err + } + scheme.AddKnownTypes(SchemeGroupVersion, + &Pod{}, + &PodList{}, + &PodStatusResult{}, + &PodTemplate{}, + &PodTemplateList{}, + &ReplicationControllerList{}, + &ReplicationController{}, + &ServiceList{}, + &Service{}, + &ServiceProxyOptions{}, + &NodeList{}, + &Node{}, + &NodeProxyOptions{}, + &Endpoints{}, + &EndpointsList{}, + &Binding{}, + &Event{}, + &EventList{}, + &List{}, + &LimitRange{}, + &LimitRangeList{}, + &ResourceQuota{}, + &ResourceQuotaList{}, + &Namespace{}, + &NamespaceList{}, + &ServiceAccount{}, + &ServiceAccountList{}, + &Secret{}, + &SecretList{}, + &PersistentVolume{}, + &PersistentVolumeList{}, + &PersistentVolumeClaim{}, + &PersistentVolumeClaimList{}, + &PodAttachOptions{}, + &PodLogOptions{}, + &PodExecOptions{}, + &PodPortForwardOptions{}, + &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, + &SerializedReference{}, + &RangeAllocation{}, + &ConfigMap{}, + &ConfigMapList{}, + ) + + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/api/resource.go b/vendor/k8s.io/client-go/pkg/api/resource.go new file mode 100644 index 000000000..27b7fd665 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/resource.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/apimachinery/pkg/api/resource" +) + +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) NvidiaGPU() *resource.Quantity { + if val, ok := (*self)[ResourceNvidiaGPU]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) StorageOverlay() *resource.Quantity { + if val, ok := (*self)[ResourceStorageOverlay]; ok { + return &val + } + return &resource.Quantity{} +} diff --git a/vendor/k8s.io/client-go/pkg/api/taint.go b/vendor/k8s.io/client-go/pkg/api/taint.go new file mode 100644 index 000000000..173e4e601 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/taint.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package api + +import "fmt" + +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +func (t *Taint) ToString() string { + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} diff --git a/vendor/k8s.io/client-go/pkg/api/toleration.go b/vendor/k8s.io/client-go/pkg/api/toleration.go new file mode 100644 index 000000000..edb73b74e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/toleration.go @@ -0,0 +1,30 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package api + +// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , +// if the two tolerations have same combination, regard as they match. +// TODO: uniqueness check for tolerations in api validations. +func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { + return t.Key == tolerationToMatch.Key && + t.Effect == tolerationToMatch.Effect && + t.Operator == tolerationToMatch.Operator && + t.Value == tolerationToMatch.Value +} diff --git a/vendor/k8s.io/client-go/pkg/api/types.go b/vendor/k8s.io/client-go/pkg/api/types.go new file mode 100644 index 000000000..412d6d08c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/types.go @@ -0,0 +1,3989 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// Common string formats +// --------------------- +// Many fields in this API have formatting requirements. The commonly used +// formats are defined here. +// +// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier" +// in the C language. This is captured by the following regex: +// [A-Za-z_][A-Za-z0-9_]* +// This defines the format, but not the length restriction, which should be +// specified at the definition of any field of this type. +// +// DNS_LABEL: This is a string, no more than 63 characters long, that conforms +// to the definition of a "label" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])? +// +// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms +// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// or more simply: +// DNS_LABEL(\.DNS_LABEL)* +// +// IANA_SVC_NAME: This is a string, no more than 15 characters long, that +// conforms to the definition of IANA service name in RFC 6335. +// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. +// Hypens ('-') cannot be leading or trailing character of the string +// and cannot be adjacent to other hyphens. + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. +type ObjectMeta struct { + // Name is unique within a namespace. Name is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // +optional + Name string + + // GenerateName indicates that the name should be made unique by the server prior to persisting + // it. A non-empty value for the field indicates the name will be made unique (and the name + // returned to the client will be different than the name passed). The value of this field will + // be combined with a unique suffix on the server if the Name field has not been provided. + // The provided value must be valid within the rules for Name, and may be truncated by the length + // of the suffix required to make the value unique on the server. + // + // If this field is specified, and Name is not present, the server will NOT return a 409 if the + // generated name exists - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // +optional + GenerateName string + + // Namespace defines the space within which name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // +optional + Namespace string + + // SelfLink is a URL representing this object. + // +optional + SelfLink string + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // +optional + UID types.UID + + // An opaque value that represents the version of this resource. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and values may only be valid for a particular + // resource or set of resources. Only servers will generate resource versions. + // +optional + ResourceVersion string + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + Generation int64 + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // +optional + CreationTimestamp metav1.Time + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + DeletionTimestamp *metav1.Time + + // DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion + // was requested. Represents the most recent grace period, and may only be shortened once set. + // +optional + DeletionGracePeriodSeconds *int64 + + // Labels are key value pairs that may be used to scope and select individual resources. + // Label keys are of the form: + // label-key ::= prefixed-name | name + // prefixed-name ::= prefix '/' name + // prefix ::= DNS_SUBDOMAIN + // name ::= DNS_LABEL + // The prefix is optional. If the prefix is not specified, the key is assumed to be private + // to the user. Other system components that wish to use labels must specify a prefix. The + // "kubernetes.io/" prefix is reserved for use by kubernetes components. + // +optional + Labels map[string]string + + // Annotations are unstructured key value data stored with a resource that may be set by + // external tooling. They are not queryable and should be preserved when modifying + // objects. Annotation keys have the same formatting restrictions as Label keys. See the + // comments on Labels for details. + // +optional + Annotations map[string]string + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + OwnerReferences []metav1.OwnerReference + + // An initializer is a controller which enforces some system invariant at object creation time. + // This field is a list of initializers that have not yet acted on this object. If nil or empty, + // this object has been completely initialized. Otherwise, the object is considered uninitialized + // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to + // observe uninitialized objects. + // + // When an object is created, the system will populate this list with the current set of initializers. + // Only privileged users may set or modify this list. Once it is empty, it may not be modified further + // by any user. + Initializers *metav1.Initializers + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + Finalizers []string + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + ClusterName string +} + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault string = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll string = "" + // NamespaceNone is the argument for a context when there is no namespace. + NamespaceNone string = "" + // NamespaceSystem is the system namespace where we place system components. + NamespaceSystem string = "kube-system" + // NamespacePublic is the namespace where we place public info (ConfigMaps) + NamespacePublic string = "kube-public" + // TerminationMessagePathDefault means the default path to capture the application termination message running in a container + TerminationMessagePathDefault string = "/dev/termination-log" +) + +// Volume represents a named volume in a pod that may be accessed by any containers in the pod. +type Volume struct { + // Required: This must be a DNS_LABEL. Each volume in a pod must have + // a unique name. + Name string + // The VolumeSource represents the location and type of a volume to mount. + // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + // +optional + VolumeSource +} + +// VolumeSource represents the source location of a volume to mount. +// Only one of its members may be specified. +type VolumeSource struct { + // HostPath represents file or directory on the host machine that is + // directly exposed to the container. This is generally used for system + // agents or other privileged things that are allowed to see the host + // machine. Most containers will NOT need this. + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + HostPath *HostPathVolumeSource + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // +optional + EmptyDir *EmptyDirVolumeSource + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // GitRepo represents a git repository at a particular revision. + // +optional + GitRepo *GitRepoVolumeSource + // Secret represents a secret that should populate this volume. + // +optional + Secret *SecretVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIVolumeSource + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime + // +optional + Glusterfs *GlusterfsVolumeSource + // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace + // +optional + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDVolumeSource + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. + // +optional + FlexVolume *FlexVolumeSource + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderVolumeSource + + // CephFS represents a Cephfs mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource + + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + + // DownwardAPI represents metadata about the pod that should populate this volume + // +optional + DownwardAPI *DownwardAPIVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource + // ConfigMap represents a configMap that should populate this volume + // +optional + ConfigMap *ConfigMapVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // +optional + StorageOS *StorageOSVolumeSource +} + +// Similar to VolumeSource but meant for the administrator who creates PVs. +// Exactly one of its members must be set. +type PersistentVolumeSource struct { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // +optional + HostPath *HostPathVolumeSource + // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod + // +optional + Glusterfs *GlusterfsVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDVolumeSource + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + // ISCSIVolumeSource represents an ISCSI resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIVolumeSource + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. + // +optional + FlexVolume *FlexVolumeSource + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderVolumeSource + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource + // Local represents directly-attached storage with node affinity + // +optional + Local *LocalVolumeSource + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // +optional + StorageOS *StorageOSPersistentVolumeSource +} + +type PersistentVolumeClaimVolumeSource struct { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume + ClaimName string + // Optional: Defaults to false (read/write). ReadOnly here + // will force the ReadOnly setting in VolumeMounts + // +optional + ReadOnly bool +} + +const ( + // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. + // It's currently still used and will be held for backwards compatibility + BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" + + // MountOptionAnnotation defines mount option annotation used in PVs + MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" + + // AlphaStorageNodeAffinityAnnotation defines node affinity policies for a PersistentVolume. + // Value is a string of the json representation of type NodeAffinity + AlphaStorageNodeAffinityAnnotation = "volume.alpha.kubernetes.io/node-affinity" +) + +// +genclient=true +// +nonNamespaced=true + +type PersistentVolume struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + //Spec defines a persistent volume owned by the cluster + // +optional + Spec PersistentVolumeSpec + + // Status represents the current information about persistent volume. + // +optional + Status PersistentVolumeStatus +} + +type PersistentVolumeSpec struct { + // Resources represents the actual resources of the volume + Capacity ResourceList + // Source represents the location and type of a volume to mount. + PersistentVolumeSource + // AccessModes contains all ways the volume can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // ClaimRef is expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is + // ignored, i.e. labels of this PV do not need to match PVC selector. + // +optional + ClaimRef *ObjectReference + // Optional: what happens to a persistent volume when released from its claim. + // +optional + PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + StorageClassName string +} + +// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes +type PersistentVolumeReclaimPolicy string + +const ( + // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. + // The volume plugin must support Recycling. + PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" + // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. + // The volume plugin must support Deletion. + PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" + // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. + // The default policy is Retain. + PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" +) + +type PersistentVolumeStatus struct { + // Phase indicates if a volume is available, bound to a claim, or released by a claim + // +optional + Phase PersistentVolumePhase + // A human-readable message indicating details about why the volume is in this state. + // +optional + Message string + // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI + // +optional + Reason string +} + +type PersistentVolumeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolume +} + +// +genclient=true + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +type PersistentVolumeClaim struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the volume requested by a pod author + // +optional + Spec PersistentVolumeClaimSpec + + // Status represents the current information about a claim + // +optional + Status PersistentVolumeClaimStatus +} + +type PersistentVolumeClaimList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolumeClaim +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +type PersistentVolumeClaimSpec struct { + // Contains the types of access modes required + // +optional + AccessModes []PersistentVolumeAccessMode + // A label query over volumes to consider for binding. This selector is + // ignored when VolumeName is set + // +optional + Selector *metav1.LabelSelector + // Resources represents the minimum resources required + // +optional + Resources ResourceRequirements + // VolumeName is the binding reference to the PersistentVolume backing this + // claim. When set to non-empty value Selector is not evaluated + // +optional + VolumeName string + // Name of the StorageClass required by the claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 + // +optional + StorageClassName *string +} + +type PersistentVolumeClaimStatus struct { + // Phase represents the current phase of PersistentVolumeClaim + // +optional + Phase PersistentVolumeClaimPhase + // AccessModes contains all ways the volume backing the PVC can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // Represents the actual resources of the underlying volume + // +optional + Capacity ResourceList +} + +type PersistentVolumeAccessMode string + +const ( + // can be mounted read/write mode to exactly 1 host + ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" + // can be mounted in read-only mode to many hosts + ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" + // can be mounted in read/write mode to many hosts + ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" +) + +type PersistentVolumePhase string + +const ( + // used for PersistentVolumes that are not available + VolumePending PersistentVolumePhase = "Pending" + // used for PersistentVolumes that are not yet bound + // Available volumes are held by the binder and matched to PersistentVolumeClaims + VolumeAvailable PersistentVolumePhase = "Available" + // used for PersistentVolumes that are bound + VolumeBound PersistentVolumePhase = "Bound" + // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted + // released volumes must be recycled before becoming available again + // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource + VolumeReleased PersistentVolumePhase = "Released" + // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim + VolumeFailed PersistentVolumePhase = "Failed" +) + +type PersistentVolumeClaimPhase string + +const ( + // used for PersistentVolumeClaims that are not yet bound + ClaimPending PersistentVolumeClaimPhase = "Pending" + // used for PersistentVolumeClaims that are bound + ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" +) + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +type HostPathVolumeSource struct { + Path string +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +type EmptyDirVolumeSource struct { + // TODO: Longer term we want to represent the selection of underlying + // media more like a scheduling problem - user says what traits they + // need, we give them a backing store that satisfies that. For now + // this will cover the most common needs. + // Optional: what type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // +optional + Medium StorageMedium + // Total amount of local storage required for this EmptyDir volume. + // The size limit is also applicable for memory medium. + // The maximum usage on memory medium EmptyDir would be the minimum value between + // the SizeLimit specified here and the sum of memory limits of all containers in a pod. + // The default is nil which means that the limit is undefined. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + SizeLimit resource.Quantity +} + +// StorageMedium defines ways that storage can be allocated to a volume. +type StorageMedium string + +const ( + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node + StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) +) + +// Protocol defines network protocols supported for things like container ports. +type Protocol string + +const ( + // ProtocolTCP is the TCP protocol. + ProtocolTCP Protocol = "TCP" + // ProtocolUDP is the UDP protocol. + ProtocolUDP Protocol = "UDP" +) + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +type GCEPersistentDiskVolumeSource struct { + // Unique name of the PD resource. Used to identify the disk in GCE + PDName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIVolumeSource struct { + // Required: iSCSI target portal + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + TargetPortal string + // Required: target iSCSI Qualified Name + // +optional + IQN string + // Required: iSCSI target lun number + // +optional + Lun int32 + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string + // Optional: whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool + // Optional: whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool + // Optional: CHAP secret for iSCSI target and initiator authentication. + // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true + // +optional + SecretRef *LocalObjectReference +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +type FCVolumeSource struct { + // Required: FC target worldwide names (WWNs) + TargetWWNs []string + // Required: FC target lun number + Lun *int32 + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +type FlexVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: Extra driver options if any. + // +optional + Options map[string]string +} + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +type AWSElasticBlockStoreVolumeSource struct { + // Unique id of the persistent disk resource. Used to identify the disk in AWS + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +type GitRepoVolumeSource struct { + // Repository URL + Repository string + // Commit hash, this is optional + // +optional + Revision string + // Clone target, this is optional + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + Directory string + // TODO: Consider credentials here. +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +type SecretVolumeSource struct { + // Name of the secret in the pod's namespace to use. + // +optional + SecretName string + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +type NFSVolumeSource struct { + // Server is the hostname or IP address of the NFS server + Server string + + // Path is the exported NFS share + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the NFS export to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +type QuobyteVolumeSource struct { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + Registry string + + // Volume is a string that references an already created Quobyte volume by name. + Volume string + + // Defaults to false (read/write). ReadOnly here will force + // the Quobyte to be mounted with read-only permissions + // +optional + ReadOnly bool + + // User to map volume access to + // Defaults to the root user + // +optional + User string + + // Group to map volume access to + // Default is no group + // +optional + Group string +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsVolumeSource struct { + // Required: EndpointsName is the endpoint name that details Glusterfs topology + EndpointsName string + + // Required: Path is the Glusterfs volume path + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the Glusterfs to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDVolumeSource struct { + // Required: CephMonitors is a collection of Ceph monitors + CephMonitors []string + // Required: RBDImage is the rados image name + RBDImage string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: RadosPool is the rados pool name,default is rbd + // +optional + RBDPool string + // Optional: RBDUser is the rados user name, default is admin + // +optional + RadosUser string + // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring + // +optional + Keyring string + // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a cinder volume resource in Openstack. A Cinder volume +// must exist before mounting to a container. The volume must also be +// in the same region as the kubelet. Cinder volumes support ownership +// management and SELinux relabeling. +type CinderVolumeSource struct { + // Unique id of the volume used to identify the cinder volume + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + Monitors []string + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string + // Optional: User is the rados user name, default is admin + // +optional + User string + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // +optional + SecretFile string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +type FlockerVolumeSource struct { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + DatasetName string + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + DatasetUUID string +} + +// Represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +type DownwardAPIVolumeSource struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Represents a single file containing information from the downward API +type DownwardAPIVolumeFile struct { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + Path string + // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFileVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string + // Share Name + ShareName string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a vSphere volume resource. +type VsphereVirtualDiskVolumeSource struct { + // Path that identifies vSphere volume vmdk + VolumePath string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Storage Policy Based Management (SPBM) profile name. + // +optional + StoragePolicyName string + // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + // +optional + StoragePolicyID string +} + +// Represents a Photon Controller persistent disk resource. +type PhotonPersistentDiskVolumeSource struct { + // ID that identifies Photon Controller persistent disk + PdID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + FSType string +} + +// PortworxVolumeSource represents a Portworx volume resource. +type PortworxVolumeSource struct { + // VolumeID uniquely identifies a Portworx volume + VolumeID string + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +type AzureDataDiskCachingMode string +type AzureDataDiskKind string + +const ( + AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" + AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" + AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" + + AzureSharedBlobDisk AzureDataDiskKind = "Shared" + AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated" + AzureManagedDisk AzureDataDiskKind = "Managed" +) + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +type AzureDiskVolumeSource struct { + // The Name of the data disk in the blob storage + DiskName string + // The URI of the data disk in the blob storage + DataDiskURI string + // Host Caching mode: None, Read Only, Read Write. + // +optional + CachingMode *AzureDataDiskCachingMode + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType *string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool + // Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + Kind *AzureDataDiskKind +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +type ScaleIOVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string + // The name of the storage system as configured in ScaleIO. + System string + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *LocalObjectReference + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool + // The name of the Protection Domain for the configured storage (defaults to "default"). + // +optional + ProtectionDomain string + // The Storage Pool associated with the protection domain (defaults to "default"). + // +optional + StoragePool string + // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // +optional + StorageMode string + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a StorageOS persistent volume resource. +type StorageOSVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *LocalObjectReference +} + +// Represents a StorageOS persistent volume resource. +type StorageOSPersistentVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *ObjectReference +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +type ConfigMapVolumeSource struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + Secret *SecretProjection + // information about the downwardAPI data to project + DownwardAPI *DownwardAPIProjection + // information about the configMap data to project + ConfigMap *ConfigMapProjection +} + +// Maps a string key to a path within a volume. +type KeyToPath struct { + // The key to project. + Key string + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + Path string + // Optional: mode bits to use on this file, should be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// Local represents directly-attached storage with node affinity +type LocalVolumeSource struct { + // The full path to the volume on the node + // For alpha, this path must be a directory + // Once block as a source is supported, then this path can point to a block device + Path string +} + +// ContainerPort represents a network port in a single container +type ContainerPort struct { + // Optional: If specified, this must be an IANA_SVC_NAME Each named port + // in a pod must have a unique name. + // +optional + Name string + // Optional: If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // +optional + HostPort int32 + // Required: This must be a valid port number, 0 < x < 65536. + ContainerPort int32 + // Required: Supports "TCP" and "UDP". + // +optional + Protocol Protocol + // Optional: What host IP to bind the external port to. + // +optional + HostIP string +} + +// VolumeMount describes a mounting of a Volume within a container. +type VolumeMount struct { + // Required: This must match the Name of a Volume [above]. + Name string + // Optional: Defaults to false (read-write). + // +optional + ReadOnly bool + // Required. Must not contain ':'. + MountPath string + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + SubPath string +} + +// EnvVar represents an environment variable present in a Container. +type EnvVar struct { + // Required: This must be a C_IDENTIFIER. + Name string + // Optional: no more than one of the following may be specified. + // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // +optional + Value string + // Optional: Specifies a source the value of this var should come from. + // +optional + ValueFrom *EnvVarSource +} + +// EnvVarSource represents a source for the value of an EnvVar. +// Only one of its fields may be set. +type EnvVarSource struct { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Selects a key of a ConfigMap. + // +optional + ConfigMapKeyRef *ConfigMapKeySelector + // Selects a key of a secret in the pod's namespace. + // +optional + SecretKeyRef *SecretKeySelector +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +type ObjectFieldSelector struct { + // Required: Version of the schema the FieldPath is written in terms of. + // If no value is specified, it will be defaulted to the APIVersion of the + // enclosing object. + APIVersion string + // Required: Path of the field to select in the specified API version + FieldPath string +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +type ResourceFieldSelector struct { + // Container name: required for volumes, optional for env vars + // +optional + ContainerName string + // Required: resource to select + Resource string + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + Divisor resource.Quantity +} + +// Selects a key from a ConfigMap. +type ConfigMapKeySelector struct { + // The ConfigMap to select from. + LocalObjectReference + // The key to select. + Key string + // Specify whether the ConfigMap or it's key must be defined + // +optional + Optional *bool +} + +// SecretKeySelector selects a key of a Secret. +type SecretKeySelector struct { + // The name of the secret in the pod's namespace to select from. + LocalObjectReference + // The key of the secret to select from. Must be a valid secret key. + Key string + // Specify whether the Secret or it's key must be defined + // +optional + Optional *bool +} + +// EnvFromSource represents the source of a set of ConfigMaps +type EnvFromSource struct { + // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + Prefix string + // The ConfigMap to select from. + //+optional + ConfigMapRef *ConfigMapEnvSource + // The Secret to select from. + //+optional + SecretRef *SecretEnvSource +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference + // Specify whether the Secret must be defined + // +optional + Optional *bool +} + +// HTTPHeader describes a custom header to be used in HTTP probes +type HTTPHeader struct { + // The header field name + Name string + // The header field value + Value string +} + +// HTTPGetAction describes an action based on HTTP Get requests. +type HTTPGetAction struct { + // Optional: Path to access on the HTTP server. + // +optional + Path string + // Required: Name or number of the port to access on the container. + // +optional + Port intstr.IntOrString + // Optional: Host name to connect to, defaults to the pod IP. You + // probably want to set "Host" in httpHeaders instead. + // +optional + Host string + // Optional: Scheme to use for connecting to the host, defaults to HTTP. + // +optional + Scheme URIScheme + // Optional: Custom headers to set in the request. HTTP allows repeated headers. + // +optional + HTTPHeaders []HTTPHeader +} + +// URIScheme identifies the scheme used for connection to a host for Get actions +type URIScheme string + +const ( + // URISchemeHTTP means that the scheme used will be http:// + URISchemeHTTP URIScheme = "HTTP" + // URISchemeHTTPS means that the scheme used will be https:// + URISchemeHTTPS URIScheme = "HTTPS" +) + +// TCPSocketAction describes an action based on opening a socket +type TCPSocketAction struct { + // Required: Port to connect to. + // +optional + Port intstr.IntOrString + // Optional: Host name to connect to, defaults to the pod IP. + // +optional + Host string +} + +// ExecAction describes a "run in container" action. +type ExecAction struct { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // +optional + Command []string +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // The action taken to determine the health of a container + Handler + // Length of time before health checking is activated. In seconds. + // +optional + InitialDelaySeconds int32 + // Length of time before health checking times out. In seconds. + // +optional + TimeoutSeconds int32 + // How often (in seconds) to perform the probe. + // +optional + PeriodSeconds int32 + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Must be 1 for liveness. + // +optional + SuccessThreshold int32 + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // +optional + FailureThreshold int32 +} + +// PullPolicy describes a policy for if/when to pull a container image +type PullPolicy string + +const ( + // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. + PullAlways PullPolicy = "Always" + // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present + PullNever PullPolicy = "Never" + // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. + PullIfNotPresent PullPolicy = "IfNotPresent" +) + +// TerminationMessagePolicy describes how termination messages are retrieved from a container. +type TerminationMessagePolicy string + +const ( + // TerminationMessageReadFile is the default behavior and will set the container status message to + // the contents of the container's terminationMessagePath when the container exits. + TerminationMessageReadFile TerminationMessagePolicy = "File" + // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs + // for the container status message when the container exits with an error and the + // terminationMessagePath has no contents. + TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" +) + +// Capability represent POSIX capabilities type +type Capability string + +// Capabilities represent POSIX capabilities that can be added or removed to a running container. +type Capabilities struct { + // Added capabilities + // +optional + Add []Capability + // Removed capabilities + // +optional + Drop []Capability +} + +// ResourceRequirements describes the compute resource requirements. +type ResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // +optional + Limits ResourceList + // Requests describes the minimum amount of compute resources required. + // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value + // +optional + Requests ResourceList +} + +// Container represents a single container that is expected to be run on the host. +type Container struct { + // Required: This must be a DNS_LABEL. Each container in a pod must + // have a unique name. + Name string + // Required. + Image string + // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Command []string + // Optional: The docker image's cmd is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Args []string + // Optional: Defaults to Docker's default. + // +optional + WorkingDir string + // +optional + Ports []ContainerPort + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource + // +optional + Env []EnvVar + // Compute resource requirements. + // +optional + Resources ResourceRequirements + // +optional + VolumeMounts []VolumeMount + // +optional + LivenessProbe *Probe + // +optional + ReadinessProbe *Probe + // +optional + Lifecycle *Lifecycle + // Required. + // +optional + TerminationMessagePath string + // +optional + TerminationMessagePolicy TerminationMessagePolicy + // Required: Policy for pulling images for this container + ImagePullPolicy PullPolicy + // Optional: SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // +optional + SecurityContext *SecurityContext + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + // +optional + Stdin bool + // +optional + StdinOnce bool + // +optional + TTY bool +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +type Handler struct { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + Exec *ExecAction + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *HTTPGetAction + // TCPSocket specifies an action involving a TCP port. + // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *TCPSocketAction +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +type Lifecycle struct { + // PostStart is called immediately after a container is created. If the handler fails, the container + // is terminated and restarted. + // +optional + PostStart *Handler + // PreStop is called immediately before a container is terminated. The reason for termination is + // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. + // +optional + PreStop *Handler +} + +// The below types are used by kube_client and api_server. + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; +// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +type ContainerStateWaiting struct { + // A brief CamelCase string indicating details about why the container is in waiting state. + // +optional + Reason string + // A human-readable message indicating details about why the container is in waiting state. + // +optional + Message string +} + +type ContainerStateRunning struct { + // +optional + StartedAt metav1.Time +} + +type ContainerStateTerminated struct { + ExitCode int32 + // +optional + Signal int32 + // +optional + Reason string + // +optional + Message string + // +optional + StartedAt metav1.Time + // +optional + FinishedAt metav1.Time + // +optional + ContainerID string +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +type ContainerState struct { + // +optional + Waiting *ContainerStateWaiting + // +optional + Running *ContainerStateRunning + // +optional + Terminated *ContainerStateTerminated +} + +type ContainerStatus struct { + // Each container in a pod must have a unique name. + Name string + // +optional + State ContainerState + // +optional + LastTerminationState ContainerState + // Ready specifies whether the container has passed its readiness check. + Ready bool + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + RestartCount int32 + Image string + ImageID string + // +optional + ContainerID string +} + +// PodPhase is a label for the condition of a pod at the current time. +type PodPhase string + +// These are the valid statuses of pods. +const ( + // PodPending means the pod has been accepted by the system, but one or more of the containers + // has not been started. This includes time before being bound to a node, as well as time spent + // pulling images onto the host. + PodPending PodPhase = "Pending" + // PodRunning means the pod has been bound to a node and all of the containers have been started. + // At least one container is still running or is in the process of being restarted. + PodRunning PodPhase = "Running" + // PodSucceeded means that all containers in the pod have voluntarily terminated + // with a container exit code of 0, and the system is not going to restart any of these containers. + PodSucceeded PodPhase = "Succeeded" + // PodFailed means that all containers in the pod have terminated, and at least one container has + // terminated in a failure (exited with a non-zero exit code or was stopped by the system). + PodFailed PodPhase = "Failed" + // PodUnknown means that for some reason the state of the pod could not be obtained, typically due + // to an error in communicating with the host of the pod. + PodUnknown PodPhase = "Unknown" +) + +type PodConditionType string + +// These are valid conditions of pod. +const ( + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" + // PodReady means the pod is able to service requests and should be added to the + // load balancing pools of all matching services. + PodReady PodConditionType = "Ready" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" + // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler + // can't schedule the pod right now, for example due to insufficient resources in the cluster. + PodReasonUnschedulable = "Unschedulable" +) + +type PodCondition struct { + Type PodConditionType + Status ConditionStatus + // +optional + LastProbeTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +// RestartPolicy describes how the container should be restarted. +// Only one of the following restart policies may be specified. +// If none of the following policies is specified, the default one +// is RestartPolicyAlways. +type RestartPolicy string + +const ( + RestartPolicyAlways RestartPolicy = "Always" + RestartPolicyOnFailure RestartPolicy = "OnFailure" + RestartPolicyNever RestartPolicy = "Never" +) + +// PodList is a list of Pods. +type PodList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Pod +} + +// DNSPolicy defines how a pod's DNS will be configured. +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst indicates that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault indicates that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" +) + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +type NodeSelector struct { + //Required. A list of node selector terms. The terms are ORed. + NodeSelectorTerms []NodeSelectorTerm +} + +// A null or empty node selector term matches no objects. +type NodeSelectorTerm struct { + //Required. A list of node selector requirements. The requirements are ANDed. + MatchExpressions []NodeSelectorRequirement +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +type NodeSelectorRequirement struct { + // The label key that the selector applies to. + Key string + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + Operator NodeSelectorOperator + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + Values []string +} + +// A node selector operator is the set of operators that can be used in +// a node selector requirement. +type NodeSelectorOperator string + +const ( + NodeSelectorOpIn NodeSelectorOperator = "In" + NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" + NodeSelectorOpExists NodeSelectorOperator = "Exists" + NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" + NodeSelectorOpGt NodeSelectorOperator = "Gt" + NodeSelectorOpLt NodeSelectorOperator = "Lt" +) + +// Affinity is a group of affinity scheduling rules. +type Affinity struct { + // Describes node affinity scheduling rules for the pod. + // +optional + NodeAffinity *NodeAffinity + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAffinity *PodAffinity + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAntiAffinity *PodAntiAffinity +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int32 + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key matches that of any node on which +// a pod of the set of pods is running. +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + // +optional + LabelSelector *metav1.LabelSelector + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + Namespaces []string + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" + // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); + // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. + // +optional + TopologyKey string +} + +// Node affinity is a group of node affinity scheduling rules. +type NodeAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // will try to eventually evict the pod from its node. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +type PreferredSchedulingTerm struct { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + Weight int32 + // A node selector term, associated with the corresponding weight. + Preference NodeSelectorTerm +} + +// The node this Taint is attached to has the effect "effect" on +// any pod that that does not tolerate the Taint. +type Taint struct { + // Required. The taint key to be applied to a node. + Key string + // Required. The taint value corresponding to the taint key. + // +optional + Value string + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + Effect TaintEffect + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + TimeAdded metav1.Time +} + +type TaintEffect string + +const ( + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // but allow all pods submitted to Kubelet without going through the scheduler + // to start, and allow all already-running pods to continue running. + // Enforced by the scheduler. + TaintEffectNoSchedule TaintEffect = "NoSchedule" + // Like TaintEffectNoSchedule, but the scheduler tries not to schedule + // new pods onto the node, rather than prohibiting new pods from scheduling + // onto the node entirely. Enforced by the scheduler. + TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to + // Kubelet without going through the scheduler to start. + // Enforced by Kubelet and the scheduler. + // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" + // Evict any already-running pods that do not tolerate the taint. + // Currently enforced by NodeController. + TaintEffectNoExecute TaintEffect = "NoExecute" +) + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +type Toleration struct { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + Key string + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + Operator TolerationOperator + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + Value string + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + Effect TaintEffect + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + TolerationSeconds *int64 +} + +// A toleration operator is the set of operators that can be used in a toleration. +type TolerationOperator string + +const ( + TolerationOpExists TolerationOperator = "Exists" + TolerationOpEqual TolerationOperator = "Equal" +) + +// PodSpec is a description of a pod +type PodSpec struct { + Volumes []Volume + // List of initialization containers belonging to the pod. + InitContainers []Container + // List of containers belonging to the pod. + Containers []Container + // +optional + RestartPolicy RestartPolicy + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // +optional + TerminationGracePeriodSeconds *int64 + // Optional duration in seconds relative to the StartTime that the pod may be active on a node + // before the system actively tries to terminate the pod; value must be positive integer + // +optional + ActiveDeadlineSeconds *int64 + // Required: Set DNS policy. + // +optional + DNSPolicy DNSPolicy + // NodeSelector is a selector which must be true for the pod to fit on a node + // +optional + NodeSelector map[string]string + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod + // The pod will be allowed to use secrets referenced by the ServiceAccount + ServiceAccountName string + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *PodSecurityContext + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // +optional + ImagePullSecrets []LocalObjectReference + // Specifies the hostname of the Pod. + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + Hostname string + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + // +optional + Subdomain string + // If specified, the pod's scheduling constraints + // +optional + Affinity *Affinity + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string + // If specified, the pod's tolerations. + // +optional + Tolerations []Toleration + // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + // file if specified. This is only valid for non-hostNetwork pods. + // +optional + HostAliases []HostAlias +} + +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +type HostAlias struct { + IP string + Hostnames []string +} + +// Sysctl defines a kernel parameter to be set +type Sysctl struct { + // Name of a property to set + Name string + // Value of a property to set + Value string +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +type PodSecurityContext struct { + // Use the host's network namespace. If this option is set, the ports that will be + // used must be specified. + // Optional: Default to false + // +k8s:conversion-gen=false + // +optional + HostNetwork bool + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostPID bool + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostIPC bool + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + SELinuxOptions *SELinuxOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsUser *types.UnixUserID + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + SupplementalGroups []types.UnixGroupID + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + FSGroup *types.UnixGroupID +} + +// PodQOSClass defines the supported qos classes of Pods. +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system. +type PodStatus struct { + // +optional + Phase PodPhase + // +optional + Conditions []PodCondition + // A human readable message indicating details about why the pod is in this state. + // +optional + Message string + // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk' + // +optional + Reason string + + // +optional + HostIP string + // +optional + PodIP string + + // Date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + StartTime *metav1.Time + // +optional + QOSClass PodQOSClass + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status + InitContainerStatuses []ContainerStatus + // The list has one entry per container in the manifest. Each entry is + // currently the output of `docker inspect`. This output format is *not* + // final and should not be relied upon. + // TODO: Make real decisions about what our info should look like. Re-enable fuzz test + // when we have done this. + // +optional + ContainerStatuses []ContainerStatus +} + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +type PodStatusResult struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// +genclient=true + +// Pod is a collection of containers, used as either input (create, update) or as output (list, get). +type Pod struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec + + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// PodTemplateSpec describes the data a pod should have when created from a template +type PodTemplateSpec struct { + // Metadata of the pods created from this template. + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec +} + +// +genclient=true + +// PodTemplate describes a template for creating copies of a predefined pod. +type PodTemplate struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Template defines the pods that will be created from this pod template + // +optional + Template PodTemplateSpec +} + +// PodTemplateList is a list of PodTemplates. +type PodTemplateList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []PodTemplate +} + +// ReplicationControllerSpec is the specification of a replication controller. +// As the internal representation of a replication controller, it may have either +// a TemplateRef or a Template set. +type ReplicationControllerSpec struct { + // Replicas is the number of desired replicas. + Replicas int32 + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // Selector is a label query over pods that should match the Replicas count. + Selector map[string]string + + // TemplateRef is a reference to an object that describes the pod that will be created if + // insufficient replicas are detected. This reference is ignored if a Template is set. + // Must be set before converting to a versioned API object + // +optional + //TemplateRef *ObjectReference + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Internally, this takes precedence over a + // TemplateRef. + // +optional + Template *PodTemplateSpec +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +type ReplicationControllerStatus struct { + // Replicas is the number of actual replicas. + Replicas int32 + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + FullyLabeledReplicas int32 + + // The number of ready replicas for this replication controller. + // +optional + ReadyReplicas int32 + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + AvailableReplicas int32 + + // ObservedGeneration is the most recent generation observed by the controller. + // +optional + ObservedGeneration int64 + + // Represents the latest available observations of a replication controller's current state. + // +optional + Conditions []ReplicationControllerCondition +} + +type ReplicationControllerConditionType string + +// These are valid conditions of a replication controller. +const ( + // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods + // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, + // etc. or deleted due to kubelet being down or finalizers are failing. + ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" +) + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +type ReplicationControllerCondition struct { + // Type of replication controller condition. + Type ReplicationControllerConditionType + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + // +optional + Reason string + // A human readable message indicating details about the transition. + // +optional + Message string +} + +// +genclient=true + +// ReplicationController represents the configuration of a replication controller. +type ReplicationController struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired behavior of this replication controller. + // +optional + Spec ReplicationControllerSpec + + // Status is the current status of this replication controller. This data may be + // out of date by some window of time. + // +optional + Status ReplicationControllerStatus +} + +// ReplicationControllerList is a collection of replication controllers. +type ReplicationControllerList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ReplicationController +} + +const ( + // ClusterIPNone - do not assign a cluster IP + // no proxying required and no environment variables should be created for pods + ClusterIPNone = "None" +) + +// ServiceList holds a list of services. +type ServiceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Service +} + +// Session Affinity Type string +type ServiceAffinity string + +const ( + // ServiceAffinityClientIP is the Client IP based. + ServiceAffinityClientIP ServiceAffinity = "ClientIP" + + // ServiceAffinityNone - no session affinity. + ServiceAffinityNone ServiceAffinity = "None" +) + +// Service Type string describes ingress methods for a service +type ServiceType string + +const ( + // ServiceTypeClusterIP means a service will only be accessible inside the + // cluster, via the ClusterIP. + ServiceTypeClusterIP ServiceType = "ClusterIP" + + // ServiceTypeNodePort means a service will be exposed on one port of + // every node, in addition to 'ClusterIP' type. + ServiceTypeNodePort ServiceType = "NodePort" + + // ServiceTypeLoadBalancer means a service will be exposed via an + // external load balancer (if the cloud provider supports it), in addition + // to 'NodePort' type. + ServiceTypeLoadBalancer ServiceType = "LoadBalancer" + + // ServiceTypeExternalName means a service consists of only a reference to + // an external name that kubedns or equivalent will return as a CNAME + // record, with no exposing or proxying of any pods involved. + ServiceTypeExternalName ServiceType = "ExternalName" +) + +// Service External Traffic Policy Type string +type ServiceExternalTrafficPolicyType string + +const ( + // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. + ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" + // ServiceExternalTrafficPolicyTypeCluster specifies cluster-wide (legacy) behavior. + ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" +) + +// ServiceStatus represents the current status of a service +type ServiceStatus struct { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + LoadBalancer LoadBalancerStatus +} + +// LoadBalancerStatus represents the status of a load-balancer +type LoadBalancerStatus struct { + // Ingress is a list containing ingress points for the load-balancer; + // traffic intended for the service should be sent to these ingress points. + // +optional + Ingress []LoadBalancerIngress +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +type LoadBalancerIngress struct { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + IP string + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + Hostname string +} + +// ServiceSpec describes the attributes that a user creates on a service +type ServiceSpec struct { + // Type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + // +optional + Type ServiceType + + // Required: The list of ports that are exposed by this service. + Ports []ServicePort + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + Selector map[string]string + + // ClusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + ClusterIP string + + // ExternalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid DNS name and requires Type to be ExternalName. + ExternalName string + + // ExternalIPs are used by external load balancers, or can be set by + // users to handle external traffic that arrives at a node. + // +optional + ExternalIPs []string + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + LoadBalancerIP string + + // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. + // +optional + SessionAffinity ServiceAffinity + + // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // +optional + LoadBalancerSourceRanges []string + + // externalTrafficPolicy denotes if this Service desires to route external + // traffic to node-local or cluster-wide endpoints. "Local" preserves the + // client source IP and avoids a second hop for LoadBalancer and Nodeport + // type services, but risks potentially imbalanced traffic spreading. + // "Cluster" obscures the client source IP and may cause a second hop to + // another node, but should have good overall load-spreading. + // +optional + ExternalTrafficPolicy ServiceExternalTrafficPolicyType + + // healthCheckNodePort specifies the healthcheck nodePort for the service. + // If not specified, HealthCheckNodePort is created by the service api + // backend with the allocated nodePort. Will use user-specified nodePort value + // if specified by the client. Only effects when Type is set to LoadBalancer + // and ExternalTrafficPolicy is set to Local. + // +optional + HealthCheckNodePort int32 +} + +type ServicePort struct { + // Optional if only one ServicePort is defined on this service: The + // name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. + Name string + + // The IP protocol for this port. Supports "TCP" and "UDP". + Protocol Protocol + + // The port that will be exposed on the service. + Port int32 + + // Optional: The target port on pods selected by this service. If this + // is a string, it will be looked up as a named port in the target + // Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + TargetPort intstr.IntOrString + + // The port on each node on which this service is exposed. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + NodePort int32 +} + +// +genclient=true + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +type Service struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a service. + // +optional + Spec ServiceSpec + + // Status represents the current status of a service. + // +optional + Status ServiceStatus +} + +// +genclient=true + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +type ServiceAccount struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount + Secrets []ObjectReference + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // +optional + ImagePullSecrets []LocalObjectReference + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + AutomountServiceAccountToken *bool +} + +// ServiceAccountList is a list of ServiceAccount objects +type ServiceAccountList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ServiceAccount +} + +// +genclient=true + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +type Endpoints struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // The set of all endpoints is the union of all subsets. + Subsets []EndpointSubset +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +type EndpointSubset struct { + Addresses []EndpointAddress + NotReadyAddresses []EndpointAddress + Ports []EndpointPort +} + +// EndpointAddress is a tuple that describes single IP address. +type EndpointAddress struct { + // The IP of this endpoint. + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, see #4447. + IP string + // Optional: Hostname of this endpoint + // Meant to be used by DNS servers etc. + // +optional + Hostname string + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + NodeName *string + // Optional: The kubernetes object related to the entry point. + TargetRef *ObjectReference +} + +// EndpointPort is a tuple that describes a single port. +type EndpointPort struct { + // The name of this port (corresponds to ServicePort.Name). Optional + // if only one port is defined. Must be a DNS_LABEL. + Name string + + // The port number. + Port int32 + + // The IP protocol for this port. + Protocol Protocol +} + +// EndpointsList is a list of endpoints. +type EndpointsList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Endpoints +} + +// NodeSpec describes the attributes that a node is created with. +type NodeSpec struct { + // PodCIDR represents the pod IP range assigned to the node + // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. + // +optional + PodCIDR string + + // External ID of the node assigned by some machine database (e.g. a cloud provider) + // +optional + ExternalID string + + // ID of the node assigned by the cloud provider + // Note: format is "://" + // +optional + ProviderID string + + // Unschedulable controls node schedulability of new pods. By default node is schedulable. + // +optional + Unschedulable bool + + // If specified, the node's taints. + // +optional + Taints []Taint +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +type DaemonEndpoint struct { + /* + The port tag was not properly in quotes in earlier releases, so it must be + uppercased for backwards compat (since it was falling back to var name of + 'Port'). + */ + + // Port number of the given endpoint. + Port int32 +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +type NodeDaemonEndpoints struct { + // Endpoint on which Kubelet is listening. + // +optional + KubeletEndpoint DaemonEndpoint +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +type NodeSystemInfo struct { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is preferred. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + MachineID string + // SystemUUID reported by the node. For unique machine identification + // MachineID is preferred. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + SystemUUID string + // Boot ID reported by the node. + BootID string + // Kernel Version reported by the node. + KernelVersion string + // OS Image reported by the node. + OSImage string + // ContainerRuntime Version reported by the node. + ContainerRuntimeVersion string + // Kubelet Version reported by the node. + KubeletVersion string + // KubeProxy Version reported by the node. + KubeProxyVersion string + // The Operating System reported by the node + OperatingSystem string + // The Architecture reported by the node + Architecture string +} + +// NodeStatus is information about the current status of a node. +type NodeStatus struct { + // Capacity represents the total resources of a node. + // +optional + Capacity ResourceList + // Allocatable represents the resources of a node that are available for scheduling. + // +optional + Allocatable ResourceList + // NodePhase is the current lifecycle phase of the node. + // +optional + Phase NodePhase + // Conditions is an array of current node conditions. + // +optional + Conditions []NodeCondition + // Queried from cloud provider, if available. + // +optional + Addresses []NodeAddress + // Endpoints of daemons running on the Node. + // +optional + DaemonEndpoints NodeDaemonEndpoints + // Set of ids/uuids to uniquely identify the node. + // +optional + NodeInfo NodeSystemInfo + // List of container images on this node + // +optional + Images []ContainerImage + // List of attachable volumes in use (mounted) by the node. + // +optional + VolumesInUse []UniqueVolumeName + // List of volumes that are attached to the node. + // +optional + VolumesAttached []AttachedVolume +} + +type UniqueVolumeName string + +// AttachedVolume describes a volume attached to a node +type AttachedVolume struct { + // Name of the attached volume + Name UniqueVolumeName + + // DevicePath represents the device path where the volume should be available + DevicePath string +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +type AvoidPods struct { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + PreferAvoidPods []PreferAvoidPodsEntry +} + +// Describes a class of pods that should avoid this node. +type PreferAvoidPodsEntry struct { + // The class of pods. + PodSignature PodSignature + // Time at which this entry was added to the list. + // +optional + EvictionTime metav1.Time + // (brief) reason why this entry was added to the list. + // +optional + Reason string + // Human readable message indicating why this entry was added to the list. + // +optional + Message string +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +type PodSignature struct { + // Reference to controller whose pods should avoid this node. + // +optional + PodController *metav1.OwnerReference +} + +// Describe a container image +type ContainerImage struct { + // Names by which this image is known. + Names []string + // The size of the image in bytes. + // +optional + SizeBytes int64 +} + +type NodePhase string + +// These are the valid phases of node. +const ( + // NodePending means the node has been created/added by the system, but not configured. + NodePending NodePhase = "Pending" + // NodeRunning means the node has been configured and has Kubernetes components running. + NodeRunning NodePhase = "Running" + // NodeTerminated means the node has been removed from the cluster. + NodeTerminated NodePhase = "Terminated" +) + +type NodeConditionType string + +// These are valid conditions of node. Currently, we don't have enough information to decide +// node condition. In the future, we will add more. The proposed set of conditions are: +// NodeReady, NodeReachable +const ( + // NodeReady means kubelet is healthy and ready to accept pods. + NodeReady NodeConditionType = "Ready" + // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk + // space on the node. + NodeOutOfDisk NodeConditionType = "OutOfDisk" + // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. + NodeMemoryPressure NodeConditionType = "MemoryPressure" + // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. + NodeDiskPressure NodeConditionType = "DiskPressure" + // NodeNetworkUnavailable means that network for the node is not correctly configured. + NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" +) + +type NodeCondition struct { + Type NodeConditionType + Status ConditionStatus + // +optional + LastHeartbeatTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +type NodeAddressType string + +const ( + NodeHostName NodeAddressType = "Hostname" + NodeExternalIP NodeAddressType = "ExternalIP" + NodeInternalIP NodeAddressType = "InternalIP" + NodeExternalDNS NodeAddressType = "ExternalDNS" + NodeInternalDNS NodeAddressType = "InternalDNS" +) + +type NodeAddress struct { + Type NodeAddressType + Address string +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + // +optional + Capacity ResourceList +} + +// ResourceName is the name identifying various resources in a ResourceList. +type ResourceName string + +// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, +// with the -, _, and . characters allowed anywhere, except the first or last character. +// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than +// camel case, separating compound words. +// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. +const ( + // CPU, in cores. (500m = .5 cores) + ResourceCPU ResourceName = "cpu" + // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceMemory ResourceName = "memory" + // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) + ResourceStorage ResourceName = "storage" + // Local Storage for overlay filesystem, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // The resource name for ResourceStorageOverlay is alpha and it can change across releases. + ResourceStorageOverlay ResourceName = "storage.kubernetes.io/overlay" + // Local Storage for scratch space, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // The resource name for ResourceStorageScratch is alpha and it can change across releases. + ResourceStorageScratch ResourceName = "storage.kubernetes.io/scratch" + // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. + ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" + // Number of Pods that may be running on this Node: see ResourcePods +) + +const ( + // Namespace prefix for opaque counted resources (alpha). + ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" +) + +// ResourceList is a set of (resource name, quantity) pairs. +type ResourceList map[ResourceName]resource.Quantity + +// +genclient=true +// +nonNamespaced=true + +// Node is a worker node in Kubernetes +// The name of the node according to etcd is in ObjectMeta.Name. +type Node struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a node. + // +optional + Spec NodeSpec + + // Status describes the current status of a Node + // +optional + Status NodeStatus +} + +// NodeList is a list of nodes. +type NodeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Node +} + +// NamespaceSpec describes the attributes on a Namespace +type NamespaceSpec struct { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []FinalizerName +} + +// FinalizerName is the name identifying a finalizer during namespace lifecycle. +type FinalizerName string + +// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or +// in metav1. +const ( + FinalizerKubernetes FinalizerName = "kubernetes" +) + +// NamespaceStatus is information about the current status of a Namespace. +type NamespaceStatus struct { + // Phase is the current lifecycle phase of the namespace. + // +optional + Phase NamespacePhase +} + +type NamespacePhase string + +// These are the valid phases of a namespace. +const ( + // NamespaceActive means the namespace is available for use in the system + NamespaceActive NamespacePhase = "Active" + // NamespaceTerminating means the namespace is undergoing graceful termination + NamespaceTerminating NamespacePhase = "Terminating" +) + +// +genclient=true +// +nonNamespaced=true + +// A namespace provides a scope for Names. +// Use of multiple namespaces is optional +type Namespace struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of the Namespace. + // +optional + Spec NamespaceSpec + + // Status describes the current status of a Namespace + // +optional + Status NamespaceStatus +} + +// NamespaceList is a list of Namespaces. +type NamespaceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Namespace +} + +// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. +// Deprecated in 1.7, please use the bindings subresource of pods instead. +type Binding struct { + metav1.TypeMeta + // ObjectMeta describes the object that is being bound. + // +optional + metav1.ObjectMeta + + // Target is the object to bind to. + Target ObjectReference +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID +} + +// DeletionPropagation decides whether and how garbage collection will be performed. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store. + // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp. + // This policy is cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + +// DeleteOptions may be provided when deleting an API object +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +type DeleteOptions struct { + metav1.TypeMeta + + // Optional duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // +optional + GracePeriodSeconds *int64 + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + Preconditions *Preconditions + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + OrphanDependents *bool + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + PropagationPolicy *DeletionPropagation +} + +// ListOptions is the query options to a standard REST list call, and has future support for +// watch calls. +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +type ListOptions struct { + metav1.TypeMeta + + // A selector based on labels + LabelSelector labels.Selector + // A selector based on fields + FieldSelector fields.Selector + + // If true, partially initialized resources are included in the response. + IncludeUninitialized bool + + // If true, watch for changes to this list + Watch bool + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + ResourceVersion string + // Timeout for the list/watch call. + TimeoutSeconds *int64 +} + +// PodLogOptions is the query options for a Pod's logs REST call +type PodLogOptions struct { + metav1.TypeMeta + + // Container for which to return logs + Container string + // If true, follow the logs for the pod + Follow bool + // If true, return previous terminated container logs + Previous bool + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. + Timestamps bool + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 +} + +// PodAttachOptions is the query options to a Pod's remote attach call +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +type PodAttachOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the attach call + // +optional + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the attach call + // +optional + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the attach call + // +optional + Stderr bool + + // TTY if true indicates that a tty will be allocated for the attach call + // +optional + TTY bool + + // Container to attach to. + // +optional + Container string +} + +// PodExecOptions is the query options to a Pod's remote exec call +type PodExecOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the exec call + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the exec call + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the exec call + Stderr bool + + // TTY if true indicates that a tty will be allocated for the exec call + TTY bool + + // Container in which to execute the command. + Container string + + // Command is the remote command to execute; argv array; not executed within a shell. + Command []string +} + +// PodPortForwardOptions is the query options to a Pod's port forward call +type PodPortForwardOptions struct { + metav1.TypeMeta + + // The list of ports to forward + // +optional + Ports []int32 +} + +// PodProxyOptions is the query options to a Pod's proxy call +type PodProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// NodeProxyOptions is the query options to a Node's proxy call +type NodeProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// ServiceProxyOptions is the query options to a Service's proxy call. +type ServiceProxyOptions struct { + metav1.TypeMeta + + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + Path string +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // +optional + Kind string + // +optional + Namespace string + // +optional + Name string + // +optional + UID types.UID + // +optional + APIVersion string + // +optional + ResourceVersion string + + // Optional. If referring to a piece of an object instead of an entire object, this string + // should contain information to identify the sub-object. For example, if the object + // reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + FieldPath string +} + +// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. +type LocalObjectReference struct { + //TODO: Add other useful fields. apiVersion, kind, uid? + Name string +} + +type SerializedReference struct { + metav1.TypeMeta + // +optional + Reference ObjectReference +} + +type EventSource struct { + // Component from which the event is generated. + // +optional + Component string + // Node name on which the event is generated. + // +optional + Host string +} + +// Valid values for event types (new types could be added in future) +const ( + // Information only and will not cause any problems + EventTypeNormal string = "Normal" + // These events are to warn that something might go wrong + EventTypeWarning string = "Warning" +) + +// +genclient=true + +// Event is a report of an event somewhere in the cluster. +// TODO: Decide whether to store these separately or with the object they apply to. +type Event struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Required. The object that this event is about. + // +optional + InvolvedObject ObjectReference + + // Optional; this should be a short, machine understandable string that gives the reason + // for this event being generated. For example, if the event is reporting that a container + // can't start, the Reason might be "ImageNotFound". + // TODO: provide exact specification for format. + // +optional + Reason string + + // Optional. A human-readable description of the status of this operation. + // TODO: decide on maximum length. + // +optional + Message string + + // Optional. The component reporting this event. Should be a short machine understandable string. + // +optional + Source EventSource + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + FirstTimestamp metav1.Time + + // The time at which the most recent occurrence of this event was recorded. + // +optional + LastTimestamp metav1.Time + + // The number of times this event has occurred. + // +optional + Count int32 + + // Type of this event (Normal, Warning), new types could be added in the future. + // +optional + Type string +} + +// EventList is a list of events. +type EventList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Event +} + +// List holds a list of objects, which may not be known by the server. +type List struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []runtime.Object +} + +// A type of object that is limited +type LimitType string + +const ( + // Limit that applies to all pods in a namespace + LimitTypePod LimitType = "Pod" + // Limit that applies to all containers in a namespace + LimitTypeContainer LimitType = "Container" + // Limit that applies to all persistent volume claims in a namespace + LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" +) + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind +type LimitRangeItem struct { + // Type of resource that this limit applies to + // +optional + Type LimitType + // Max usage constraints on this kind by resource name + // +optional + Max ResourceList + // Min usage constraints on this kind by resource name + // +optional + Min ResourceList + // Default resource requirement limit value by resource name. + // +optional + Default ResourceList + // DefaultRequest resource requirement request value by resource name. + // +optional + DefaultRequest ResourceList + // MaxLimitRequestRatio represents the max burst value for the named resource + // +optional + MaxLimitRequestRatio ResourceList +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind +type LimitRangeSpec struct { + // Limits is the list of LimitRangeItem objects that are enforced + Limits []LimitRangeItem +} + +// +genclient=true + +// LimitRange sets resource usage limits for each kind of resource in a Namespace +type LimitRange struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the limits enforced + // +optional + Spec LimitRangeSpec +} + +// LimitRangeList is a list of LimitRange items. +type LimitRangeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of LimitRange objects + Items []LimitRange +} + +// The following identify resource constants for Kubernetes object types +const ( + // Pods, number + ResourcePods ResourceName = "pods" + // Services, number + ResourceServices ResourceName = "services" + // ReplicationControllers, number + ResourceReplicationControllers ResourceName = "replicationcontrollers" + // ResourceQuotas, number + ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourceConfigMaps, number + ResourceConfigMaps ResourceName = "configmaps" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" + // ResourceServicesNodePorts, number + ResourceServicesNodePorts ResourceName = "services.nodeports" + // ResourceServicesLoadBalancers, number + ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" + // CPU request, in cores. (500m = .5 cores) + ResourceRequestsCPU ResourceName = "requests.cpu" + // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsMemory ResourceName = "requests.memory" + // Storage request, in bytes + ResourceRequestsStorage ResourceName = "requests.storage" + // CPU limit, in cores. (500m = .5 cores) + ResourceLimitsCPU ResourceName = "limits.cpu" + // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsMemory ResourceName = "limits.memory" +) + +// A ResourceQuotaScope defines a filter that must match each object tracked by a quota +type ResourceQuotaScope string + +const ( + // Match all pod objects where spec.activeDeadlineSeconds + ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" + // Match all pod objects where !spec.activeDeadlineSeconds + ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" + // Match all pod objects that have best effort quality of service + ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" + // Match all pod objects that do not have best effort quality of service + ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" +) + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota +type ResourceQuotaSpec struct { + // Hard is the set of desired hard limits for each named resource + // +optional + Hard ResourceList + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + Scopes []ResourceQuotaScope +} + +// ResourceQuotaStatus defines the enforced hard limits and observed use +type ResourceQuotaStatus struct { + // Hard is the set of enforced hard limits for each named resource + // +optional + Hard ResourceList + // Used is the current observed total usage of the resource in the namespace + // +optional + Used ResourceList +} + +// +genclient=true + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +type ResourceQuota struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired quota + // +optional + Spec ResourceQuotaSpec + + // Status defines the actual enforced quota and its current usage + // +optional + Status ResourceQuotaStatus +} + +// ResourceQuotaList is a list of ResourceQuota items +type ResourceQuotaList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of ResourceQuota objects + Items []ResourceQuota +} + +// +genclient=true + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +type Secret struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the secret data. Each key must consist of alphanumeric + // characters, '-', '_' or '.'. The serialized form of the secret data is a + // base64 encoded string, representing the arbitrary (possibly non-string) + // data value here. + // +optional + Data map[string][]byte + + // Used to facilitate programmatic handling of secret data. + // +optional + Type SecretType +} + +const MaxSecretSize = 1 * 1024 * 1024 + +type SecretType string + +const ( + // SecretTypeOpaque is the default; arbitrary user-defined data + SecretTypeOpaque SecretType = "Opaque" + + // SecretTypeServiceAccountToken contains a token that identifies a service account to the API + // + // Required fields: + // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies + // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies + // - Secret.Data["token"] - a token that identifies the service account to the API + SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" + + // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountNameKey = "kubernetes.io/service-account.name" + // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountUIDKey = "kubernetes.io/service-account.uid" + // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets + ServiceAccountTokenKey = "token" + // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets + ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" + // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets + ServiceAccountRootCAKey = "ca.crt" + // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls + ServiceAccountNamespaceKey = "namespace" + + // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg + // + // Required fields: + // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file + SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" + + // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets + DockerConfigKey = ".dockercfg" + + // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json + // + // Required fields: + // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file + SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" + + // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets + DockerConfigJsonKey = ".dockerconfigjson" + + // SecretTypeBasicAuth contains data needed for basic authentication. + // + // Required at least one of fields: + // - Secret.Data["username"] - username used for authentication + // - Secret.Data["password"] - password or token needed for authentication + SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" + + // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets + BasicAuthUsernameKey = "username" + // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets + BasicAuthPasswordKey = "password" + + // SecretTypeSSHAuth contains data needed for SSH authetication. + // + // Required field: + // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication + SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" + + // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets + SSHAuthPrivateKey = "ssh-privatekey" + + // SecretTypeTLS contains information about a TLS client or server secret. It + // is primarily used with TLS termination of the Ingress resource, but may be + // used in other types. + // + // Required fields: + // - Secret.Data["tls.key"] - TLS private key. + // Secret.Data["tls.crt"] - TLS certificate. + // TODO: Consider supporting different formats, specifying CA/destinationCA. + SecretTypeTLS SecretType = "kubernetes.io/tls" + + // TLSCertKey is the key for tls certificates in a TLS secret. + TLSCertKey = "tls.crt" + // TLSPrivateKeyKey is the key for the private key field in a TLS secret. + TLSPrivateKeyKey = "tls.key" +) + +type SecretList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Secret +} + +// +genclient=true + +// ConfigMap holds configuration data for components or applications to consume. +type ConfigMap struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the configuration data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // +optional + Data map[string]string +} + +// ConfigMapList is a resource containing a list of ConfigMap objects. +type ConfigMapList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is the list of ConfigMaps. + Items []ConfigMap +} + +// These constants are for remote command execution and port forwarding and are +// used by both the client side and server side components. +// +// This is probably not the ideal place for them, but it didn't seem worth it +// to create pkg/exec and pkg/portforward just to contain a single file with +// constants in it. Suggestions for more appropriate alternatives are +// definitely welcome! +const ( + // Enable stdin for remote command execution + ExecStdinParam = "input" + // Enable stdout for remote command execution + ExecStdoutParam = "output" + // Enable stderr for remote command execution + ExecStderrParam = "error" + // Enable TTY for remote command execution + ExecTTYParam = "tty" + // Command to run for remote command execution + ExecCommandParamm = "command" + + // Name of header that specifies stream type + StreamType = "streamType" + // Value for streamType header for stdin stream + StreamTypeStdin = "stdin" + // Value for streamType header for stdout stream + StreamTypeStdout = "stdout" + // Value for streamType header for stderr stream + StreamTypeStderr = "stderr" + // Value for streamType header for data stream + StreamTypeData = "data" + // Value for streamType header for error stream + StreamTypeError = "error" + // Value for streamType header for terminal resize stream + StreamTypeResize = "resize" + + // Name of header that specifies the port being forwarded + PortHeader = "port" + // Name of header that specifies a request ID used to associate the error + // and data streams for a single forwarded connection + PortForwardRequestIDHeader = "requestID" +) + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +type ComponentCondition struct { + Type ComponentConditionType + Status ConditionStatus + // +optional + Message string + // +optional + Error string +} + +// +genclient=true +// +nonNamespaced=true + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // +optional + Conditions []ComponentCondition +} + +type ComponentStatusList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ComponentStatus +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +type SecurityContext struct { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + Capabilities *Capabilities + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + Privileged *bool + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + SELinuxOptions *SELinuxOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsUser *types.UnixUserID + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool + // The read-only root filesystem allows you to restrict the locations that an application can write + // files to, ensuring the persistent data can only be written to mounts. + // +optional + ReadOnlyRootFilesystem *bool +} + +// SELinuxOptions are the labels to be applied to the container. +type SELinuxOptions struct { + // SELinux user label + // +optional + User string + // SELinux role label + // +optional + Role string + // SELinux type label + // +optional + Type string + // SELinux level label. + // +optional + Level string +} + +// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record +// the global allocation state of the cluster. The schema of Range and Data generic, in that Range +// should be a string representation of the inputs to a range (for instance, for IP allocation it +// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a +// binary range. Consumers should use annotations to record additional information (schema version, +// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation +// of the cluster, thus the object is less strongly typed than most. +type RangeAllocation struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or + // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define + // a start and end unless there is an implicit end. + Range string + // A byte array representing the serialized state of a range allocation. Additional clarifiers on + // the type or format of data should be represented with annotations. For IP allocations, this is + // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing + // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). + Data []byte +} + +const ( + // "default-scheduler" is the name of default scheduler. + DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int = 1 +) diff --git a/vendor/k8s.io/client-go/pkg/api/v1/annotation_key_constants.go b/vendor/k8s.io/client-go/pkg/api/v1/annotation_key_constants.go new file mode 100644 index 000000000..bb4588554 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/annotation_key_constants.go @@ -0,0 +1,113 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file should be consistent with pkg/api/annotation_key_constants.go. + +package v1 + +const ( + // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy + // webhook backend fails. + ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" + + // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation + PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude" + + // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods + MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" + + // TolerationsAnnotationKey represents the key of tolerations data (json serialized) + // in the Annotations of a Pod. + TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" + + // TaintsAnnotationKey represents the key of taints data (json serialized) + // in the Annotations of a Node. + TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" + + // SeccompPodAnnotationKey represents the key of a seccomp profile applied + // to all containers of a pod. + SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" + + // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied + // to one container of a pod. + SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" + + // CreatedByAnnotation represents the key used to store the spec(json) + // used to create the resource. + CreatedByAnnotation = "kubernetes.io/created-by" + + // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) + // in the Annotations of a Node. + PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" + + // SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by + // the kubelet. Pods with other sysctls will fail to launch. + SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls" + + // UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly + // namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use + // is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet + // will fail to launch. + UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls" + + // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // an object (e.g. secret, config map) before fetching it again from apiserver. + // This annotation can be attached to node. + ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + + // AffinityAnnotationKey represents the key of affinity data (json serialized) + // in the Annotations of a Pod. + // TODO: remove when alpha support for affinity is removed + AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" + + // annotation key prefix used to identify non-convertible json paths. + NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" + + kubectlPrefix = "kubectl.kubernetes.io/" + + // LastAppliedConfigAnnotation is the annotation used to store the previous + // configuration of a resource for use in a three way diff by UpdateApplyAnnotation. + LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" + + // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers + // + // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to + // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow + // access only from the CIDRs currently allocated to MIT & the USPS. + // + // Not all cloud providers support this annotation, though AWS & GCE do. + AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" + + // AnnotationValueExternalTrafficLocal Value of annotation to specify local endpoints behavior. + AnnotationValueExternalTrafficLocal = "OnlyLocal" + // AnnotationValueExternalTrafficGlobal Value of annotation to specify global (legacy) behavior. + AnnotationValueExternalTrafficGlobal = "Global" + + // TODO: The beta annotations have been deprecated, remove them when we release k8s 1.8. + + // BetaAnnotationHealthCheckNodePort Annotation specifying the healthcheck nodePort for the service. + // If not specified, annotation is created by the service api backend with the allocated nodePort. + // Will use user-specified nodePort value if specified by the client. + BetaAnnotationHealthCheckNodePort = "service.beta.kubernetes.io/healthcheck-nodeport" + + // BetaAnnotationExternalTraffic An annotation that denotes if this Service desires to route + // external traffic to local endpoints only. This preserves Source IP and avoids a second hop. + BetaAnnotationExternalTraffic = "service.beta.kubernetes.io/external-traffic" +) diff --git a/vendor/k8s.io/client-go/pkg/api/v1/conversion.go b/vendor/k8s.io/client-go/pkg/api/v1/conversion.go new file mode 100644 index 000000000..4ded76091 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/conversion.go @@ -0,0 +1,776 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "fmt" + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/extensions" +) + +// This is a "fast-path" that avoids reflection for common types. It focuses on the objects that are +// converted the most in the cluster. +// TODO: generate one of these for every external API group - this is to prove the impact +func addFastPathConversionFuncs(scheme *runtime.Scheme) error { + scheme.AddGenericConversionFunc(func(objA, objB interface{}, s conversion.Scope) (bool, error) { + switch a := objA.(type) { + case *Pod: + switch b := objB.(type) { + case *api.Pod: + return true, Convert_v1_Pod_To_api_Pod(a, b, s) + } + case *api.Pod: + switch b := objB.(type) { + case *Pod: + return true, Convert_api_Pod_To_v1_Pod(a, b, s) + } + + case *Event: + switch b := objB.(type) { + case *api.Event: + return true, Convert_v1_Event_To_api_Event(a, b, s) + } + case *api.Event: + switch b := objB.(type) { + case *Event: + return true, Convert_api_Event_To_v1_Event(a, b, s) + } + + case *ReplicationController: + switch b := objB.(type) { + case *api.ReplicationController: + return true, Convert_v1_ReplicationController_To_api_ReplicationController(a, b, s) + } + case *api.ReplicationController: + switch b := objB.(type) { + case *ReplicationController: + return true, Convert_api_ReplicationController_To_v1_ReplicationController(a, b, s) + } + + case *Node: + switch b := objB.(type) { + case *api.Node: + return true, Convert_v1_Node_To_api_Node(a, b, s) + } + case *api.Node: + switch b := objB.(type) { + case *Node: + return true, Convert_api_Node_To_v1_Node(a, b, s) + } + + case *Namespace: + switch b := objB.(type) { + case *api.Namespace: + return true, Convert_v1_Namespace_To_api_Namespace(a, b, s) + } + case *api.Namespace: + switch b := objB.(type) { + case *Namespace: + return true, Convert_api_Namespace_To_v1_Namespace(a, b, s) + } + + case *Service: + switch b := objB.(type) { + case *api.Service: + return true, Convert_v1_Service_To_api_Service(a, b, s) + } + case *api.Service: + switch b := objB.(type) { + case *Service: + return true, Convert_api_Service_To_v1_Service(a, b, s) + } + + case *Endpoints: + switch b := objB.(type) { + case *api.Endpoints: + return true, Convert_v1_Endpoints_To_api_Endpoints(a, b, s) + } + case *api.Endpoints: + switch b := objB.(type) { + case *Endpoints: + return true, Convert_api_Endpoints_To_v1_Endpoints(a, b, s) + } + + case *metav1.WatchEvent: + switch b := objB.(type) { + case *metav1.InternalEvent: + return true, metav1.Convert_versioned_Event_to_versioned_InternalEvent(a, b, s) + } + case *metav1.InternalEvent: + switch b := objB.(type) { + case *metav1.WatchEvent: + return true, metav1.Convert_versioned_InternalEvent_to_versioned_Event(a, b, s) + } + } + return false, nil + }) + return nil +} + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_api_Pod_To_v1_Pod, + Convert_api_PodSpec_To_v1_PodSpec, + Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, + Convert_api_ServiceSpec_To_v1_ServiceSpec, + Convert_v1_Pod_To_api_Pod, + Convert_v1_PodSpec_To_api_PodSpec, + Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, + Convert_v1_Secret_To_api_Secret, + Convert_v1_ServiceSpec_To_api_ServiceSpec, + Convert_v1_ResourceList_To_api_ResourceList, + Convert_v1_ReplicationController_to_extensions_ReplicaSet, + Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec, + Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus, + Convert_extensions_ReplicaSet_to_v1_ReplicationController, + Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec, + Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus, + ) + if err != nil { + return err + } + + // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. + for _, k := range []string{ + "Endpoints", + "ResourceQuota", + "PersistentVolumeClaim", + "Service", + "ServiceAccount", + "ConfigMap", + } { + kind := k // don't close over range variables + err = scheme.AddFieldLabelConversionFunc("v1", kind, + func(label, value string) (string, string, error) { + switch label { + case "metadata.namespace", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label %q not supported for %q", label, kind) + } + }, + ) + if err != nil { + return err + } + } + + // Add field conversion funcs. + err = scheme.AddFieldLabelConversionFunc("v1", "Pod", + func(label, value string) (string, string, error) { + switch label { + case "metadata.annotations", + "metadata.labels", + "metadata.name", + "metadata.namespace", + "spec.nodeName", + "spec.restartPolicy", + "spec.serviceAccountName", + "status.phase", + "status.hostIP", + "status.podIP": + return label, value, nil + // This is for backwards compatibility with old v1 clients which send spec.host + case "spec.host": + return "spec.nodeName", value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + err = scheme.AddFieldLabelConversionFunc("v1", "Node", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + case "spec.unschedulable": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + err = scheme.AddFieldLabelConversionFunc("v1", "ReplicationController", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", + "metadata.namespace", + "status.replicas": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) + if err != nil { + return err + } + err = scheme.AddFieldLabelConversionFunc("v1", "PersistentVolume", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + if err := AddFieldLabelConversionsForEvent(scheme); err != nil { + return err + } + if err := AddFieldLabelConversionsForNamespace(scheme); err != nil { + return err + } + if err := AddFieldLabelConversionsForSecret(scheme); err != nil { + return err + } + return nil +} + +func Convert_v1_ReplicationController_to_extensions_ReplicaSet(in *ReplicationController, out *extensions.ReplicaSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(in *ReplicationControllerSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { + out.Replicas = *in.Replicas + if in.Selector != nil { + metav1.Convert_map_to_unversioned_LabelSelector(&in.Selector, out.Selector, s) + } + if in.Template != nil { + if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, &out.Template, s); err != nil { + return err + } + } + return nil +} + +func Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(in *ReplicationControllerStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +func Convert_extensions_ReplicaSet_to_v1_ReplicationController(in *extensions.ReplicaSet, out *ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + fieldErr, ok := err.(*field.Error) + if !ok { + return err + } + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + out.Annotations[NonConvertibleAnnotationPrefix+"/"+fieldErr.Field] = reflect.ValueOf(fieldErr.BadValue).String() + } + if err := Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(in *extensions.ReplicaSetSpec, out *ReplicationControllerSpec, s conversion.Scope) error { + out.Replicas = new(int32) + *out.Replicas = in.Replicas + out.MinReadySeconds = in.MinReadySeconds + var invalidErr error + if in.Selector != nil { + invalidErr = metav1.Convert_unversioned_LabelSelector_to_map(in.Selector, &out.Selector, s) + } + out.Template = new(PodTemplateSpec) + if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil { + return err + } + return invalidErr +} + +func Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(in *extensions.ReplicaSetStatus, out *ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +func Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *ReplicationControllerSpec, s conversion.Scope) error { + out.Replicas = &in.Replicas + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector + if in.Template != nil { + out.Template = new(PodTemplateSpec) + if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector + if in.Template != nil { + out.Template = new(api.PodTemplateSpec) + if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, out.Template, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { + if err := autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s); err != nil { + return err + } + + if old := out.Annotations; old != nil { + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + } + if len(out.Status.InitContainerStatuses) > 0 { + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + value, err := json.Marshal(out.Status.InitContainerStatuses) + if err != nil { + return err + } + out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) + out.Annotations[PodInitContainerStatusesBetaAnnotationKey] = string(value) + } else { + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) + } + return nil +} + +func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { + // TODO: sometime after we move init container to stable, remove these conversions + // If there is a beta annotation, copy to alpha key. + // See commit log for PR #31026 for why we do this. + if valueBeta, okBeta := in.Annotations[PodInitContainerStatusesBetaAnnotationKey]; okBeta { + in.Annotations[PodInitContainerStatusesAnnotationKey] = valueBeta + } + // Move the annotation to the internal repr. field + if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok { + var values []ContainerStatus + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + // Conversion from external to internal version exists more to + // satisfy the needs of the decoder than it does to be a general + // purpose tool. And Decode always creates an intermediate object + // to decode to. Thus the caller of UnsafeConvertToVersion is + // taking responsibility to ensure mutation of in is not exposed + // back to the caller. + in.Status.InitContainerStatuses = values + } + + if err := autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s); err != nil { + return err + } + if len(out.Annotations) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) + } + return nil +} + +func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { + if err := autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil { + return err + } + + // TODO: sometime after we move init container to stable, remove these conversions. + if old := out.Annotations; old != nil { + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + } + if len(out.Spec.InitContainers) > 0 { + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + value, err := json.Marshal(out.Spec.InitContainers) + if err != nil { + return err + } + out.Annotations[PodInitContainersAnnotationKey] = string(value) + out.Annotations[PodInitContainersBetaAnnotationKey] = string(value) + } else { + delete(out.Annotations, PodInitContainersAnnotationKey) + delete(out.Annotations, PodInitContainersBetaAnnotationKey) + } + return nil +} + +func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { + // TODO: sometime after we move init container to stable, remove these conversions + // If there is a beta annotation, copy to alpha key. + // See commit log for PR #31026 for why we do this. + if valueBeta, okBeta := in.Annotations[PodInitContainersBetaAnnotationKey]; okBeta { + in.Annotations[PodInitContainersAnnotationKey] = valueBeta + } + // Move the annotation to the internal repr. field + if value, ok := in.Annotations[PodInitContainersAnnotationKey]; ok { + var values []Container + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + // Conversion from external to internal version exists more to + // satisfy the needs of the decoder than it does to be a general + // purpose tool. And Decode always creates an intermediate object + // to decode to. Thus the caller of UnsafeConvertToVersion is + // taking responsibility to ensure mutation of in is not exposed + // back to the caller. + in.Spec.InitContainers = values + + // Call defaulters explicitly until annotations are removed + tmpPodTemp := &PodTemplate{ + Template: PodTemplateSpec{ + Spec: PodSpec{ + HostNetwork: in.Spec.HostNetwork, + InitContainers: values, + }, + }, + } + SetObjectDefaults_PodTemplate(tmpPodTemp) + in.Spec.InitContainers = tmpPodTemp.Template.Spec.InitContainers + } + + if err := autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s); err != nil { + return err + } + if len(out.Annotations) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, PodInitContainersAnnotationKey) + delete(out.Annotations, PodInitContainersBetaAnnotationKey) + } + return nil +} + +// The following two PodSpec conversions are done here to support ServiceAccount +// as an alias for ServiceAccountName. +func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error { + if err := autoConvert_api_PodSpec_To_v1_PodSpec(in, out, s); err != nil { + return err + } + + // DeprecatedServiceAccount is an alias for ServiceAccountName. + out.DeprecatedServiceAccount = in.ServiceAccountName + + if in.SecurityContext != nil { + // the host namespace fields have to be handled here for backward compatibility + // with v1.0.0 + out.HostPID = in.SecurityContext.HostPID + out.HostNetwork = in.SecurityContext.HostNetwork + out.HostIPC = in.SecurityContext.HostIPC + } + + return nil +} + +func Convert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversion.Scope) error { + if err := autoConvert_v1_PodSpec_To_api_PodSpec(in, out, s); err != nil { + return err + } + + // We support DeprecatedServiceAccount as an alias for ServiceAccountName. + // If both are specified, ServiceAccountName (the new field) wins. + if in.ServiceAccountName == "" { + out.ServiceAccountName = in.DeprecatedServiceAccount + } + + // the host namespace fields have to be handled specially for backward compatibility + // with v1.0.0 + if out.SecurityContext == nil { + out.SecurityContext = new(api.PodSecurityContext) + } + out.SecurityContext.HostNetwork = in.HostNetwork + out.SecurityContext.HostPID = in.HostPID + out.SecurityContext.HostIPC = in.HostIPC + + return nil +} + +func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error { + if err := autoConvert_api_Pod_To_v1_Pod(in, out, s); err != nil { + return err + } + + // TODO: sometime after we move init container to stable, remove these conversions + if len(out.Spec.InitContainers) > 0 || len(out.Status.InitContainerStatuses) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, PodInitContainersAnnotationKey) + delete(out.Annotations, PodInitContainersBetaAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) + } + if len(out.Spec.InitContainers) > 0 { + value, err := json.Marshal(out.Spec.InitContainers) + if err != nil { + return err + } + out.Annotations[PodInitContainersAnnotationKey] = string(value) + out.Annotations[PodInitContainersBetaAnnotationKey] = string(value) + } + if len(out.Status.InitContainerStatuses) > 0 { + value, err := json.Marshal(out.Status.InitContainerStatuses) + if err != nil { + return err + } + out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) + out.Annotations[PodInitContainerStatusesBetaAnnotationKey] = string(value) + } + + return nil +} + +func Convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error { + // If there is a beta annotation, copy to alpha key. + // See commit log for PR #31026 for why we do this. + if valueBeta, okBeta := in.Annotations[PodInitContainersBetaAnnotationKey]; okBeta { + in.Annotations[PodInitContainersAnnotationKey] = valueBeta + } + // TODO: sometime after we move init container to stable, remove these conversions + // Move the annotation to the internal repr. field + if value, ok := in.Annotations[PodInitContainersAnnotationKey]; ok { + var values []Container + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + // Conversion from external to internal version exists more to + // satisfy the needs of the decoder than it does to be a general + // purpose tool. And Decode always creates an intermediate object + // to decode to. Thus the caller of UnsafeConvertToVersion is + // taking responsibility to ensure mutation of in is not exposed + // back to the caller. + in.Spec.InitContainers = values + // Call defaulters explicitly until annotations are removed + tmpPod := &Pod{ + Spec: PodSpec{ + HostNetwork: in.Spec.HostNetwork, + InitContainers: values, + }, + } + SetObjectDefaults_Pod(tmpPod) + in.Spec.InitContainers = tmpPod.Spec.InitContainers + } + // If there is a beta annotation, copy to alpha key. + // See commit log for PR #31026 for why we do this. + if valueBeta, okBeta := in.Annotations[PodInitContainerStatusesBetaAnnotationKey]; okBeta { + in.Annotations[PodInitContainerStatusesAnnotationKey] = valueBeta + } + if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok { + var values []ContainerStatus + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + // Conversion from external to internal version exists more to + // satisfy the needs of the decoder than it does to be a general + // purpose tool. And Decode always creates an intermediate object + // to decode to. Thus the caller of UnsafeConvertToVersion is + // taking responsibility to ensure mutation of in is not exposed + // back to the caller. + in.Status.InitContainerStatuses = values + } + + if err := autoConvert_v1_Pod_To_api_Pod(in, out, s); err != nil { + return err + } + if len(out.Annotations) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, PodInitContainersAnnotationKey) + delete(out.Annotations, PodInitContainersBetaAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) + } + return nil +} + +func Convert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.Scope) error { + if err := autoConvert_v1_Secret_To_api_Secret(in, out, s); err != nil { + return err + } + + // StringData overwrites Data + if len(in.StringData) > 0 { + if out.Data == nil { + out.Data = map[string][]byte{} + } + for k, v := range in.StringData { + out.Data[k] = []byte(v) + } + } + + return nil +} + +func Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *PodSecurityContext, s conversion.Scope) error { + out.SupplementalGroups = in.SupplementalGroups + if in.SELinuxOptions != nil { + out.SELinuxOptions = new(SELinuxOptions) + if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + return err + } + } else { + out.SELinuxOptions = nil + } + out.RunAsUser = in.RunAsUser + out.RunAsNonRoot = in.RunAsNonRoot + out.FSGroup = in.FSGroup + return nil +} + +func Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in *PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { + out.SupplementalGroups = in.SupplementalGroups + if in.SELinuxOptions != nil { + out.SELinuxOptions = new(api.SELinuxOptions) + if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + return err + } + } else { + out.SELinuxOptions = nil + } + out.RunAsUser = in.RunAsUser + out.RunAsNonRoot = in.RunAsNonRoot + out.FSGroup = in.FSGroup + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_v1_ResourceList_To_api_ResourceList(in *ResourceList, out *api.ResourceList, s conversion.Scope) error { + if *in == nil { + return nil + } + if *out == nil { + *out = make(api.ResourceList, len(*in)) + } + for key, val := range *in { + // Moved to defaults + // TODO(#18538): We round up resource values to milli scale to maintain API compatibility. + // In the future, we should instead reject values that need rounding. + // const milliScale = -3 + // val.RoundUp(milliScale) + + (*out)[api.ResourceName(key)] = val + } + return nil +} + +func AddFieldLabelConversionsForEvent(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc("v1", "Event", + func(label, value string) (string, string, error) { + switch label { + case "involvedObject.kind", + "involvedObject.namespace", + "involvedObject.name", + "involvedObject.uid", + "involvedObject.apiVersion", + "involvedObject.resourceVersion", + "involvedObject.fieldPath", + "reason", + "source", + "type", + "metadata.namespace", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} + +func AddFieldLabelConversionsForNamespace(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc("v1", "Namespace", + func(label, value string) (string, string, error) { + switch label { + case "status.phase", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} + +func AddFieldLabelConversionsForSecret(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc("v1", "Secret", + func(label, value string) (string, string, error) { + switch label { + case "type", + "metadata.namespace", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/defaults.go b/vendor/k8s.io/client-go/pkg/api/v1/defaults.go new file mode 100644 index 000000000..a791ba7b5 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/defaults.go @@ -0,0 +1,373 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/pkg/util" + "k8s.io/client-go/pkg/util/parsers" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_ResourceList(obj *ResourceList) { + for key, val := range *obj { + // TODO(#18538): We round up resource values to milli scale to maintain API compatibility. + // In the future, we should instead reject values that need rounding. + const milliScale = -3 + val.RoundUp(milliScale) + + (*obj)[ResourceName(key)] = val + } +} + +func SetDefaults_PodExecOptions(obj *PodExecOptions) { + obj.Stdout = true + obj.Stderr = true +} +func SetDefaults_PodAttachOptions(obj *PodAttachOptions) { + obj.Stdout = true + obj.Stderr = true +} +func SetDefaults_ReplicationController(obj *ReplicationController) { + var labels map[string]string + if obj.Spec.Template != nil { + labels = obj.Spec.Template.Labels + } + // TODO: support templates defined elsewhere when we support them in the API + if labels != nil { + if len(obj.Spec.Selector) == 0 { + obj.Spec.Selector = labels + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } +} +func SetDefaults_Volume(obj *Volume) { + if util.AllPtrFieldsNil(&obj.VolumeSource) { + obj.VolumeSource = VolumeSource{ + EmptyDir: &EmptyDirVolumeSource{}, + } + } +} +func SetDefaults_ContainerPort(obj *ContainerPort) { + if obj.Protocol == "" { + obj.Protocol = ProtocolTCP + } +} +func SetDefaults_Container(obj *Container) { + if obj.ImagePullPolicy == "" { + // Ignore error and assume it has been validated elsewhere + _, tag, _, _ := parsers.ParseImageName(obj.Image) + + // Check image tag + if tag == "latest" { + obj.ImagePullPolicy = PullAlways + } else { + obj.ImagePullPolicy = PullIfNotPresent + } + } + if obj.TerminationMessagePath == "" { + obj.TerminationMessagePath = TerminationMessagePathDefault + } + if obj.TerminationMessagePolicy == "" { + obj.TerminationMessagePolicy = TerminationMessageReadFile + } +} +func SetDefaults_Service(obj *Service) { + if obj.Spec.SessionAffinity == "" { + obj.Spec.SessionAffinity = ServiceAffinityNone + } + if obj.Spec.Type == "" { + obj.Spec.Type = ServiceTypeClusterIP + } + for i := range obj.Spec.Ports { + sp := &obj.Spec.Ports[i] + if sp.Protocol == "" { + sp.Protocol = ProtocolTCP + } + if sp.TargetPort == intstr.FromInt(0) || sp.TargetPort == intstr.FromString("") { + sp.TargetPort = intstr.FromInt(int(sp.Port)) + } + } + // Defaults ExternalTrafficPolicy field for NodePort / LoadBalancer service + // to Global for consistency. + if _, ok := obj.Annotations[BetaAnnotationExternalTraffic]; ok { + // Don't default this field if beta annotation exists. + return + } else if (obj.Spec.Type == ServiceTypeNodePort || + obj.Spec.Type == ServiceTypeLoadBalancer) && + obj.Spec.ExternalTrafficPolicy == "" { + obj.Spec.ExternalTrafficPolicy = ServiceExternalTrafficPolicyTypeCluster + } +} +func SetDefaults_Pod(obj *Pod) { + // If limits are specified, but requests are not, default requests to limits + // This is done here rather than a more specific defaulting pass on ResourceRequirements + // because we only want this defaulting semantic to take place on a Pod and not a PodTemplate + for i := range obj.Spec.Containers { + // set requests to limits if requests are not specified, but limits are + if obj.Spec.Containers[i].Resources.Limits != nil { + if obj.Spec.Containers[i].Resources.Requests == nil { + obj.Spec.Containers[i].Resources.Requests = make(ResourceList) + } + for key, value := range obj.Spec.Containers[i].Resources.Limits { + if _, exists := obj.Spec.Containers[i].Resources.Requests[key]; !exists { + obj.Spec.Containers[i].Resources.Requests[key] = *(value.Copy()) + } + } + } + } + for i := range obj.Spec.InitContainers { + if obj.Spec.InitContainers[i].Resources.Limits != nil { + if obj.Spec.InitContainers[i].Resources.Requests == nil { + obj.Spec.InitContainers[i].Resources.Requests = make(ResourceList) + } + for key, value := range obj.Spec.InitContainers[i].Resources.Limits { + if _, exists := obj.Spec.InitContainers[i].Resources.Requests[key]; !exists { + obj.Spec.InitContainers[i].Resources.Requests[key] = *(value.Copy()) + } + } + } + } +} +func SetDefaults_PodSpec(obj *PodSpec) { + if obj.DNSPolicy == "" { + obj.DNSPolicy = DNSClusterFirst + } + if obj.RestartPolicy == "" { + obj.RestartPolicy = RestartPolicyAlways + } + if obj.HostNetwork { + defaultHostNetworkPorts(&obj.Containers) + defaultHostNetworkPorts(&obj.InitContainers) + } + if obj.SecurityContext == nil { + obj.SecurityContext = &PodSecurityContext{} + } + if obj.TerminationGracePeriodSeconds == nil { + period := int64(DefaultTerminationGracePeriodSeconds) + obj.TerminationGracePeriodSeconds = &period + } + if obj.SchedulerName == "" { + obj.SchedulerName = DefaultSchedulerName + } +} +func SetDefaults_Probe(obj *Probe) { + if obj.TimeoutSeconds == 0 { + obj.TimeoutSeconds = 1 + } + if obj.PeriodSeconds == 0 { + obj.PeriodSeconds = 10 + } + if obj.SuccessThreshold == 0 { + obj.SuccessThreshold = 1 + } + if obj.FailureThreshold == 0 { + obj.FailureThreshold = 3 + } +} +func SetDefaults_SecretVolumeSource(obj *SecretVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(SecretVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_ConfigMapVolumeSource(obj *ConfigMapVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(ConfigMapVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_DownwardAPIVolumeSource(obj *DownwardAPIVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(DownwardAPIVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_Secret(obj *Secret) { + if obj.Type == "" { + obj.Type = SecretTypeOpaque + } +} +func SetDefaults_ProjectedVolumeSource(obj *ProjectedVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(ProjectedVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_PersistentVolume(obj *PersistentVolume) { + if obj.Status.Phase == "" { + obj.Status.Phase = VolumePending + } + if obj.Spec.PersistentVolumeReclaimPolicy == "" { + obj.Spec.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimRetain + } +} +func SetDefaults_PersistentVolumeClaim(obj *PersistentVolumeClaim) { + if obj.Status.Phase == "" { + obj.Status.Phase = ClaimPending + } +} +func SetDefaults_ISCSIVolumeSource(obj *ISCSIVolumeSource) { + if obj.ISCSIInterface == "" { + obj.ISCSIInterface = "default" + } +} +func SetDefaults_AzureDiskVolumeSource(obj *AzureDiskVolumeSource) { + if obj.CachingMode == nil { + obj.CachingMode = new(AzureDataDiskCachingMode) + *obj.CachingMode = AzureDataDiskCachingReadWrite + } + if obj.Kind == nil { + obj.Kind = new(AzureDataDiskKind) + *obj.Kind = AzureSharedBlobDisk + } + if obj.FSType == nil { + obj.FSType = new(string) + *obj.FSType = "ext4" + } + if obj.ReadOnly == nil { + obj.ReadOnly = new(bool) + *obj.ReadOnly = false + } +} +func SetDefaults_Endpoints(obj *Endpoints) { + for i := range obj.Subsets { + ss := &obj.Subsets[i] + for i := range ss.Ports { + ep := &ss.Ports[i] + if ep.Protocol == "" { + ep.Protocol = ProtocolTCP + } + } + } +} +func SetDefaults_HTTPGetAction(obj *HTTPGetAction) { + if obj.Path == "" { + obj.Path = "/" + } + if obj.Scheme == "" { + obj.Scheme = URISchemeHTTP + } +} +func SetDefaults_NamespaceStatus(obj *NamespaceStatus) { + if obj.Phase == "" { + obj.Phase = NamespaceActive + } +} +func SetDefaults_Node(obj *Node) { + if obj.Spec.ExternalID == "" { + obj.Spec.ExternalID = obj.Name + } +} +func SetDefaults_NodeStatus(obj *NodeStatus) { + if obj.Allocatable == nil && obj.Capacity != nil { + obj.Allocatable = make(ResourceList, len(obj.Capacity)) + for key, value := range obj.Capacity { + obj.Allocatable[key] = *(value.Copy()) + } + obj.Allocatable = obj.Capacity + } +} +func SetDefaults_ObjectFieldSelector(obj *ObjectFieldSelector) { + if obj.APIVersion == "" { + obj.APIVersion = "v1" + } +} +func SetDefaults_LimitRangeItem(obj *LimitRangeItem) { + // for container limits, we apply default values + if obj.Type == LimitTypeContainer { + + if obj.Default == nil { + obj.Default = make(ResourceList) + } + if obj.DefaultRequest == nil { + obj.DefaultRequest = make(ResourceList) + } + + // If a default limit is unspecified, but the max is specified, default the limit to the max + for key, value := range obj.Max { + if _, exists := obj.Default[key]; !exists { + obj.Default[key] = *(value.Copy()) + } + } + // If a default limit is specified, but the default request is not, default request to limit + for key, value := range obj.Default { + if _, exists := obj.DefaultRequest[key]; !exists { + obj.DefaultRequest[key] = *(value.Copy()) + } + } + // If a default request is not specified, but the min is provided, default request to the min + for key, value := range obj.Min { + if _, exists := obj.DefaultRequest[key]; !exists { + obj.DefaultRequest[key] = *(value.Copy()) + } + } + } +} +func SetDefaults_ConfigMap(obj *ConfigMap) { + if obj.Data == nil { + obj.Data = make(map[string]string) + } +} + +// With host networking default all container ports to host ports. +func defaultHostNetworkPorts(containers *[]Container) { + for i := range *containers { + for j := range (*containers)[i].Ports { + if (*containers)[i].Ports[j].HostPort == 0 { + (*containers)[i].Ports[j].HostPort = (*containers)[i].Ports[j].ContainerPort + } + } + } +} + +func SetDefaults_RBDVolumeSource(obj *RBDVolumeSource) { + if obj.RBDPool == "" { + obj.RBDPool = "rbd" + } + if obj.RadosUser == "" { + obj.RadosUser = "admin" + } + if obj.Keyring == "" { + obj.Keyring = "/etc/ceph/keyring" + } +} + +func SetDefaults_ScaleIOVolumeSource(obj *ScaleIOVolumeSource) { + if obj.ProtectionDomain == "" { + obj.ProtectionDomain = "default" + } + if obj.StoragePool == "" { + obj.StoragePool = "default" + } + if obj.StorageMode == "" { + obj.StorageMode = "ThinProvisioned" + } + if obj.FSType == "" { + obj.FSType = "xfs" + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/doc.go b/vendor/k8s.io/client-go/pkg/api/v1/doc.go new file mode 100644 index 000000000..0fdd87f75 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/k8s.io/client-go/pkg/api/v1/generate.go b/vendor/k8s.io/client-go/pkg/api/v1/generate.go new file mode 100644 index 000000000..b8c44e4c7 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/generate.go @@ -0,0 +1,64 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + utilrand "k8s.io/apimachinery/pkg/util/rand" +) + +// NameGenerator generates names for objects. Some backends may have more information +// available to guide selection of new names and this interface hides those details. +type NameGenerator interface { + // GenerateName generates a valid name from the base name, adding a random suffix to the + // the base. If base is valid, the returned name must also be valid. The generator is + // responsible for knowing the maximum valid name length. + GenerateName(base string) string +} + +// GenerateName will resolve the object name of the provided ObjectMeta to a generated version if +// necessary. It expects that validation for ObjectMeta has already completed (that Base is a +// valid name) and that the NameGenerator generates a name that is also valid. +func GenerateName(u NameGenerator, meta *ObjectMeta) { + if len(meta.GenerateName) == 0 || len(meta.Name) != 0 { + return + } + meta.Name = u.GenerateName(meta.GenerateName) +} + +// simpleNameGenerator generates random names. +type simpleNameGenerator struct{} + +// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics +// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes +// name (63 characters) +var SimpleNameGenerator NameGenerator = simpleNameGenerator{} + +const ( + // TODO: make this flexible for non-core resources with alternate naming rules. + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) + +func (simpleNameGenerator) GenerateName(base string) string { + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] + } + return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)) +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/api/v1/generated.pb.go new file mode 100644 index 000000000..13804ea9b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/generated.pb.go @@ -0,0 +1,45112 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/api/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/api/v1/generated.proto + + It has these top-level messages: + AWSElasticBlockStoreVolumeSource + Affinity + AttachedVolume + AvoidPods + AzureDiskVolumeSource + AzureFileVolumeSource + Binding + Capabilities + CephFSVolumeSource + CinderVolumeSource + ComponentCondition + ComponentStatus + ComponentStatusList + ConfigMap + ConfigMapEnvSource + ConfigMapKeySelector + ConfigMapList + ConfigMapProjection + ConfigMapVolumeSource + Container + ContainerImage + ContainerPort + ContainerState + ContainerStateRunning + ContainerStateTerminated + ContainerStateWaiting + ContainerStatus + DaemonEndpoint + DeleteOptions + DownwardAPIProjection + DownwardAPIVolumeFile + DownwardAPIVolumeSource + EmptyDirVolumeSource + EndpointAddress + EndpointPort + EndpointSubset + Endpoints + EndpointsList + EnvFromSource + EnvVar + EnvVarSource + Event + EventList + EventSource + ExecAction + FCVolumeSource + FlexVolumeSource + FlockerVolumeSource + GCEPersistentDiskVolumeSource + GitRepoVolumeSource + GlusterfsVolumeSource + HTTPGetAction + HTTPHeader + Handler + HostAlias + HostPathVolumeSource + ISCSIVolumeSource + KeyToPath + Lifecycle + LimitRange + LimitRangeItem + LimitRangeList + LimitRangeSpec + List + ListOptions + LoadBalancerIngress + LoadBalancerStatus + LocalObjectReference + LocalVolumeSource + NFSVolumeSource + Namespace + NamespaceList + NamespaceSpec + NamespaceStatus + Node + NodeAddress + NodeAffinity + NodeCondition + NodeDaemonEndpoints + NodeList + NodeProxyOptions + NodeResources + NodeSelector + NodeSelectorRequirement + NodeSelectorTerm + NodeSpec + NodeStatus + NodeSystemInfo + ObjectFieldSelector + ObjectMeta + ObjectReference + PersistentVolume + PersistentVolumeClaim + PersistentVolumeClaimList + PersistentVolumeClaimSpec + PersistentVolumeClaimStatus + PersistentVolumeClaimVolumeSource + PersistentVolumeList + PersistentVolumeSource + PersistentVolumeSpec + PersistentVolumeStatus + PhotonPersistentDiskVolumeSource + Pod + PodAffinity + PodAffinityTerm + PodAntiAffinity + PodAttachOptions + PodCondition + PodExecOptions + PodList + PodLogOptions + PodPortForwardOptions + PodProxyOptions + PodSecurityContext + PodSignature + PodSpec + PodStatus + PodStatusResult + PodTemplate + PodTemplateList + PodTemplateSpec + PortworxVolumeSource + Preconditions + PreferAvoidPodsEntry + PreferredSchedulingTerm + Probe + ProjectedVolumeSource + QuobyteVolumeSource + RBDVolumeSource + RangeAllocation + ReplicationController + ReplicationControllerCondition + ReplicationControllerList + ReplicationControllerSpec + ReplicationControllerStatus + ResourceFieldSelector + ResourceQuota + ResourceQuotaList + ResourceQuotaSpec + ResourceQuotaStatus + ResourceRequirements + SELinuxOptions + ScaleIOVolumeSource + Secret + SecretEnvSource + SecretKeySelector + SecretList + SecretProjection + SecretVolumeSource + SecurityContext + SerializedReference + Service + ServiceAccount + ServiceAccountList + ServiceList + ServicePort + ServiceProxyOptions + ServiceSpec + ServiceStatus + StorageOSPersistentVolumeSource + StorageOSVolumeSource + Sysctl + TCPSocketAction + Taint + Toleration + Volume + VolumeMount + VolumeProjection + VolumeSource + VsphereVirtualDiskVolumeSource + WeightedPodAffinityTerm +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + +import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} } +func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {} +func (*AWSElasticBlockStoreVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *Affinity) Reset() { *m = Affinity{} } +func (*Affinity) ProtoMessage() {} +func (*Affinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } +func (*AttachedVolume) ProtoMessage() {} +func (*AttachedVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *AvoidPods) Reset() { *m = AvoidPods{} } +func (*AvoidPods) ProtoMessage() {} +func (*AvoidPods) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } +func (*AzureDiskVolumeSource) ProtoMessage() {} +func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } +func (*AzureFileVolumeSource) ProtoMessage() {} +func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *Binding) Reset() { *m = Binding{} } +func (*Binding) ProtoMessage() {} +func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *Capabilities) Reset() { *m = Capabilities{} } +func (*Capabilities) ProtoMessage() {} +func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } +func (*CephFSVolumeSource) ProtoMessage() {} +func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } +func (*CinderVolumeSource) ProtoMessage() {} +func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } +func (*ComponentCondition) ProtoMessage() {} +func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } +func (*ComponentStatus) ProtoMessage() {} +func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } +func (*ComponentStatusList) ProtoMessage() {} +func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *ConfigMap) Reset() { *m = ConfigMap{} } +func (*ConfigMap) ProtoMessage() {} +func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } +func (*ConfigMapEnvSource) ProtoMessage() {} +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } +func (*ConfigMapKeySelector) ProtoMessage() {} +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } +func (*ConfigMapList) ProtoMessage() {} +func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } +func (*ConfigMapProjection) ProtoMessage() {} +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } + +func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } +func (*ConfigMapVolumeSource) ProtoMessage() {} +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } + +func (m *Container) Reset() { *m = Container{} } +func (*Container) ProtoMessage() {} +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *ContainerImage) Reset() { *m = ContainerImage{} } +func (*ContainerImage) ProtoMessage() {} +func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } + +func (m *ContainerPort) Reset() { *m = ContainerPort{} } +func (*ContainerPort) ProtoMessage() {} +func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } + +func (m *ContainerState) Reset() { *m = ContainerState{} } +func (*ContainerState) ProtoMessage() {} +func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } +func (*ContainerStateRunning) ProtoMessage() {} +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } + +func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } +func (*ContainerStateTerminated) ProtoMessage() {} +func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{24} +} + +func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } +func (*ContainerStateWaiting) ProtoMessage() {} +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (*ContainerStatus) ProtoMessage() {} +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } + +func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } +func (*DaemonEndpoint) ProtoMessage() {} +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } + +func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } +func (*DeleteOptions) ProtoMessage() {} +func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } + +func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } +func (*DownwardAPIProjection) ProtoMessage() {} +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } + +func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } +func (*DownwardAPIVolumeFile) ProtoMessage() {} +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } + +func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } +func (*DownwardAPIVolumeSource) ProtoMessage() {} +func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{31} +} + +func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } +func (*EmptyDirVolumeSource) ProtoMessage() {} +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } + +func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } +func (*EndpointAddress) ProtoMessage() {} +func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } + +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } + +func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } +func (*EndpointSubset) ProtoMessage() {} +func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } + +func (m *Endpoints) Reset() { *m = Endpoints{} } +func (*Endpoints) ProtoMessage() {} +func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } + +func (m *EndpointsList) Reset() { *m = EndpointsList{} } +func (*EndpointsList) ProtoMessage() {} +func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } + +func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } +func (*EnvFromSource) ProtoMessage() {} +func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } + +func (m *EnvVar) Reset() { *m = EnvVar{} } +func (*EnvVar) ProtoMessage() {} +func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } + +func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } +func (*EnvVarSource) ProtoMessage() {} +func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } + +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } + +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } + +func (m *EventSource) Reset() { *m = EventSource{} } +func (*EventSource) ProtoMessage() {} +func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } + +func (m *ExecAction) Reset() { *m = ExecAction{} } +func (*ExecAction) ProtoMessage() {} +func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } + +func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } +func (*FCVolumeSource) ProtoMessage() {} +func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } + +func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } +func (*FlexVolumeSource) ProtoMessage() {} +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } + +func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } +func (*FlockerVolumeSource) ProtoMessage() {} +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } + +func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } +func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} +func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{48} +} + +func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } +func (*GitRepoVolumeSource) ProtoMessage() {} +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } + +func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } +func (*GlusterfsVolumeSource) ProtoMessage() {} +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } + +func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } +func (*HTTPGetAction) ProtoMessage() {} +func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } + +func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } +func (*HTTPHeader) ProtoMessage() {} +func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } + +func (m *Handler) Reset() { *m = Handler{} } +func (*Handler) ProtoMessage() {} +func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } + +func (m *HostAlias) Reset() { *m = HostAlias{} } +func (*HostAlias) ProtoMessage() {} +func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } + +func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } +func (*HostPathVolumeSource) ProtoMessage() {} +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } + +func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } +func (*ISCSIVolumeSource) ProtoMessage() {} +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } + +func (m *KeyToPath) Reset() { *m = KeyToPath{} } +func (*KeyToPath) ProtoMessage() {} +func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } + +func (m *Lifecycle) Reset() { *m = Lifecycle{} } +func (*Lifecycle) ProtoMessage() {} +func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } + +func (m *LimitRange) Reset() { *m = LimitRange{} } +func (*LimitRange) ProtoMessage() {} +func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } + +func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } +func (*LimitRangeItem) ProtoMessage() {} +func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } + +func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } +func (*LimitRangeList) ProtoMessage() {} +func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } + +func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } +func (*LimitRangeSpec) ProtoMessage() {} +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } + +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } + +func (m *ListOptions) Reset() { *m = ListOptions{} } +func (*ListOptions) ProtoMessage() {} +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } + +func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } +func (*LoadBalancerIngress) ProtoMessage() {} +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } + +func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } +func (*LoadBalancerStatus) ProtoMessage() {} +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } + +func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } +func (*LocalObjectReference) ProtoMessage() {} +func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } + +func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } +func (*LocalVolumeSource) ProtoMessage() {} +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } + +func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } +func (*NFSVolumeSource) ProtoMessage() {} +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } + +func (m *Namespace) Reset() { *m = Namespace{} } +func (*Namespace) ProtoMessage() {} +func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } + +func (m *NamespaceList) Reset() { *m = NamespaceList{} } +func (*NamespaceList) ProtoMessage() {} +func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } + +func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } +func (*NamespaceSpec) ProtoMessage() {} +func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } + +func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } +func (*NamespaceStatus) ProtoMessage() {} +func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } + +func (m *Node) Reset() { *m = Node{} } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } + +func (m *NodeAddress) Reset() { *m = NodeAddress{} } +func (*NodeAddress) ProtoMessage() {} +func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } + +func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } +func (*NodeAffinity) ProtoMessage() {} +func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } + +func (m *NodeCondition) Reset() { *m = NodeCondition{} } +func (*NodeCondition) ProtoMessage() {} +func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } + +func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } +func (*NodeDaemonEndpoints) ProtoMessage() {} +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } + +func (m *NodeList) Reset() { *m = NodeList{} } +func (*NodeList) ProtoMessage() {} +func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } + +func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } +func (*NodeProxyOptions) ProtoMessage() {} +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } + +func (m *NodeResources) Reset() { *m = NodeResources{} } +func (*NodeResources) ProtoMessage() {} +func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } + +func (m *NodeSelector) Reset() { *m = NodeSelector{} } +func (*NodeSelector) ProtoMessage() {} +func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } + +func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } +func (*NodeSelectorRequirement) ProtoMessage() {} +func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{83} +} + +func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } +func (*NodeSelectorTerm) ProtoMessage() {} +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } + +func (m *NodeSpec) Reset() { *m = NodeSpec{} } +func (*NodeSpec) ProtoMessage() {} +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } + +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (*NodeStatus) ProtoMessage() {} +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } + +func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } +func (*NodeSystemInfo) ProtoMessage() {} +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } + +func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } +func (*ObjectFieldSelector) ProtoMessage() {} +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } + +func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } +func (*ObjectMeta) ProtoMessage() {} +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } + +func (m *ObjectReference) Reset() { *m = ObjectReference{} } +func (*ObjectReference) ProtoMessage() {} +func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } + +func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } +func (*PersistentVolume) ProtoMessage() {} +func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } + +func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } +func (*PersistentVolumeClaim) ProtoMessage() {} +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } + +func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } +func (*PersistentVolumeClaimList) ProtoMessage() {} +func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{93} +} + +func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } +func (*PersistentVolumeClaimSpec) ProtoMessage() {} +func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{94} +} + +func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } +func (*PersistentVolumeClaimStatus) ProtoMessage() {} +func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{95} +} + +func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } +func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} +func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{96} +} + +func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } +func (*PersistentVolumeList) ProtoMessage() {} +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } + +func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } +func (*PersistentVolumeSource) ProtoMessage() {} +func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } + +func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } +func (*PersistentVolumeSpec) ProtoMessage() {} +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } + +func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } +func (*PersistentVolumeStatus) ProtoMessage() {} +func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{100} +} + +func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } +func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} +func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{101} +} + +func (m *Pod) Reset() { *m = Pod{} } +func (*Pod) ProtoMessage() {} +func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{102} } + +func (m *PodAffinity) Reset() { *m = PodAffinity{} } +func (*PodAffinity) ProtoMessage() {} +func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{103} } + +func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } +func (*PodAffinityTerm) ProtoMessage() {} +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{104} } + +func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } +func (*PodAntiAffinity) ProtoMessage() {} +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{105} } + +func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } +func (*PodAttachOptions) ProtoMessage() {} +func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{106} } + +func (m *PodCondition) Reset() { *m = PodCondition{} } +func (*PodCondition) ProtoMessage() {} +func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } + +func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } +func (*PodExecOptions) ProtoMessage() {} +func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } + +func (m *PodList) Reset() { *m = PodList{} } +func (*PodList) ProtoMessage() {} +func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{109} } + +func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } +func (*PodLogOptions) ProtoMessage() {} +func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } + +func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } +func (*PodPortForwardOptions) ProtoMessage() {} +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } + +func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } +func (*PodProxyOptions) ProtoMessage() {} +func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } + +func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } +func (*PodSecurityContext) ProtoMessage() {} +func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } + +func (m *PodSignature) Reset() { *m = PodSignature{} } +func (*PodSignature) ProtoMessage() {} +func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } + +func (m *PodSpec) Reset() { *m = PodSpec{} } +func (*PodSpec) ProtoMessage() {} +func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } + +func (m *PodStatus) Reset() { *m = PodStatus{} } +func (*PodStatus) ProtoMessage() {} +func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } + +func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } +func (*PodStatusResult) ProtoMessage() {} +func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } + +func (m *PodTemplate) Reset() { *m = PodTemplate{} } +func (*PodTemplate) ProtoMessage() {} +func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } + +func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } +func (*PodTemplateList) ProtoMessage() {} +func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } + +func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } +func (*PodTemplateSpec) ProtoMessage() {} +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } + +func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } +func (*PortworxVolumeSource) ProtoMessage() {} +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } + +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } + +func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } +func (*PreferAvoidPodsEntry) ProtoMessage() {} +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } + +func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } +func (*PreferredSchedulingTerm) ProtoMessage() {} +func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{124} +} + +func (m *Probe) Reset() { *m = Probe{} } +func (*Probe) ProtoMessage() {} +func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } + +func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } +func (*ProjectedVolumeSource) ProtoMessage() {} +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } + +func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } +func (*QuobyteVolumeSource) ProtoMessage() {} +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } + +func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } +func (*RBDVolumeSource) ProtoMessage() {} +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } + +func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } +func (*RangeAllocation) ProtoMessage() {} +func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } + +func (m *ReplicationController) Reset() { *m = ReplicationController{} } +func (*ReplicationController) ProtoMessage() {} +func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } + +func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } +func (*ReplicationControllerCondition) ProtoMessage() {} +func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{131} +} + +func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } +func (*ReplicationControllerList) ProtoMessage() {} +func (*ReplicationControllerList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{132} +} + +func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } +func (*ReplicationControllerSpec) ProtoMessage() {} +func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{133} +} + +func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } +func (*ReplicationControllerStatus) ProtoMessage() {} +func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{134} +} + +func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } +func (*ResourceFieldSelector) ProtoMessage() {} +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } + +func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } +func (*ResourceQuota) ProtoMessage() {} +func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } + +func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } +func (*ResourceQuotaList) ProtoMessage() {} +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } + +func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } +func (*ResourceQuotaSpec) ProtoMessage() {} +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } + +func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } +func (*ResourceQuotaStatus) ProtoMessage() {} +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } + +func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } +func (*ResourceRequirements) ProtoMessage() {} +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } + +func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } +func (*SELinuxOptions) ProtoMessage() {} +func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } + +func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } +func (*ScaleIOVolumeSource) ProtoMessage() {} +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } + +func (m *Secret) Reset() { *m = Secret{} } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } + +func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } +func (*SecretEnvSource) ProtoMessage() {} +func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } + +func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } +func (*SecretKeySelector) ProtoMessage() {} +func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{145} } + +func (m *SecretList) Reset() { *m = SecretList{} } +func (*SecretList) ProtoMessage() {} +func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{146} } + +func (m *SecretProjection) Reset() { *m = SecretProjection{} } +func (*SecretProjection) ProtoMessage() {} +func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} } + +func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } +func (*SecretVolumeSource) ProtoMessage() {} +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } + +func (m *SecurityContext) Reset() { *m = SecurityContext{} } +func (*SecurityContext) ProtoMessage() {} +func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } + +func (m *SerializedReference) Reset() { *m = SerializedReference{} } +func (*SerializedReference) ProtoMessage() {} +func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } + +func (m *Service) Reset() { *m = Service{} } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } + +func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } +func (*ServiceAccount) ProtoMessage() {} +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } + +func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } +func (*ServiceAccountList) ProtoMessage() {} +func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } + +func (m *ServiceList) Reset() { *m = ServiceList{} } +func (*ServiceList) ProtoMessage() {} +func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } + +func (m *ServicePort) Reset() { *m = ServicePort{} } +func (*ServicePort) ProtoMessage() {} +func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } + +func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } +func (*ServiceProxyOptions) ProtoMessage() {} +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } + +func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } +func (*ServiceSpec) ProtoMessage() {} +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } + +func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } +func (*ServiceStatus) ProtoMessage() {} +func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } + +func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } +func (*StorageOSPersistentVolumeSource) ProtoMessage() {} +func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{159} +} + +func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } +func (*StorageOSVolumeSource) ProtoMessage() {} +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } + +func (m *Sysctl) Reset() { *m = Sysctl{} } +func (*Sysctl) ProtoMessage() {} +func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } + +func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } +func (*TCPSocketAction) ProtoMessage() {} +func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } + +func (m *Taint) Reset() { *m = Taint{} } +func (*Taint) ProtoMessage() {} +func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } + +func (m *Toleration) Reset() { *m = Toleration{} } +func (*Toleration) ProtoMessage() {} +func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } + +func (m *Volume) Reset() { *m = Volume{} } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } + +func (m *VolumeMount) Reset() { *m = VolumeMount{} } +func (*VolumeMount) ProtoMessage() {} +func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } + +func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } +func (*VolumeProjection) ProtoMessage() {} +func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } + +func (m *VolumeSource) Reset() { *m = VolumeSource{} } +func (*VolumeSource) ProtoMessage() {} +func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } + +func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } +func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} +func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{169} +} + +func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } +func (*WeightedPodAffinityTerm) ProtoMessage() {} +func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{170} +} + +func init() { + proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.AWSElasticBlockStoreVolumeSource") + proto.RegisterType((*Affinity)(nil), "k8s.io.client-go.pkg.api.v1.Affinity") + proto.RegisterType((*AttachedVolume)(nil), "k8s.io.client-go.pkg.api.v1.AttachedVolume") + proto.RegisterType((*AvoidPods)(nil), "k8s.io.client-go.pkg.api.v1.AvoidPods") + proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.AzureDiskVolumeSource") + proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.AzureFileVolumeSource") + proto.RegisterType((*Binding)(nil), "k8s.io.client-go.pkg.api.v1.Binding") + proto.RegisterType((*Capabilities)(nil), "k8s.io.client-go.pkg.api.v1.Capabilities") + proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.CephFSVolumeSource") + proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.CinderVolumeSource") + proto.RegisterType((*ComponentCondition)(nil), "k8s.io.client-go.pkg.api.v1.ComponentCondition") + proto.RegisterType((*ComponentStatus)(nil), "k8s.io.client-go.pkg.api.v1.ComponentStatus") + proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.client-go.pkg.api.v1.ComponentStatusList") + proto.RegisterType((*ConfigMap)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMap") + proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapEnvSource") + proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapKeySelector") + proto.RegisterType((*ConfigMapList)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapList") + proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapProjection") + proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapVolumeSource") + proto.RegisterType((*Container)(nil), "k8s.io.client-go.pkg.api.v1.Container") + proto.RegisterType((*ContainerImage)(nil), "k8s.io.client-go.pkg.api.v1.ContainerImage") + proto.RegisterType((*ContainerPort)(nil), "k8s.io.client-go.pkg.api.v1.ContainerPort") + proto.RegisterType((*ContainerState)(nil), "k8s.io.client-go.pkg.api.v1.ContainerState") + proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.client-go.pkg.api.v1.ContainerStateRunning") + proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.client-go.pkg.api.v1.ContainerStateTerminated") + proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.client-go.pkg.api.v1.ContainerStateWaiting") + proto.RegisterType((*ContainerStatus)(nil), "k8s.io.client-go.pkg.api.v1.ContainerStatus") + proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.client-go.pkg.api.v1.DaemonEndpoint") + proto.RegisterType((*DeleteOptions)(nil), "k8s.io.client-go.pkg.api.v1.DeleteOptions") + proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.client-go.pkg.api.v1.DownwardAPIProjection") + proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.client-go.pkg.api.v1.DownwardAPIVolumeFile") + proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.DownwardAPIVolumeSource") + proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.EmptyDirVolumeSource") + proto.RegisterType((*EndpointAddress)(nil), "k8s.io.client-go.pkg.api.v1.EndpointAddress") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.client-go.pkg.api.v1.EndpointPort") + proto.RegisterType((*EndpointSubset)(nil), "k8s.io.client-go.pkg.api.v1.EndpointSubset") + proto.RegisterType((*Endpoints)(nil), "k8s.io.client-go.pkg.api.v1.Endpoints") + proto.RegisterType((*EndpointsList)(nil), "k8s.io.client-go.pkg.api.v1.EndpointsList") + proto.RegisterType((*EnvFromSource)(nil), "k8s.io.client-go.pkg.api.v1.EnvFromSource") + proto.RegisterType((*EnvVar)(nil), "k8s.io.client-go.pkg.api.v1.EnvVar") + proto.RegisterType((*EnvVarSource)(nil), "k8s.io.client-go.pkg.api.v1.EnvVarSource") + proto.RegisterType((*Event)(nil), "k8s.io.client-go.pkg.api.v1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.client-go.pkg.api.v1.EventList") + proto.RegisterType((*EventSource)(nil), "k8s.io.client-go.pkg.api.v1.EventSource") + proto.RegisterType((*ExecAction)(nil), "k8s.io.client-go.pkg.api.v1.ExecAction") + proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.FCVolumeSource") + proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.FlexVolumeSource") + proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.FlockerVolumeSource") + proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.GCEPersistentDiskVolumeSource") + proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.GitRepoVolumeSource") + proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.GlusterfsVolumeSource") + proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.client-go.pkg.api.v1.HTTPGetAction") + proto.RegisterType((*HTTPHeader)(nil), "k8s.io.client-go.pkg.api.v1.HTTPHeader") + proto.RegisterType((*Handler)(nil), "k8s.io.client-go.pkg.api.v1.Handler") + proto.RegisterType((*HostAlias)(nil), "k8s.io.client-go.pkg.api.v1.HostAlias") + proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.HostPathVolumeSource") + proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.ISCSIVolumeSource") + proto.RegisterType((*KeyToPath)(nil), "k8s.io.client-go.pkg.api.v1.KeyToPath") + proto.RegisterType((*Lifecycle)(nil), "k8s.io.client-go.pkg.api.v1.Lifecycle") + proto.RegisterType((*LimitRange)(nil), "k8s.io.client-go.pkg.api.v1.LimitRange") + proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.client-go.pkg.api.v1.LimitRangeItem") + proto.RegisterType((*LimitRangeList)(nil), "k8s.io.client-go.pkg.api.v1.LimitRangeList") + proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.client-go.pkg.api.v1.LimitRangeSpec") + proto.RegisterType((*List)(nil), "k8s.io.client-go.pkg.api.v1.List") + proto.RegisterType((*ListOptions)(nil), "k8s.io.client-go.pkg.api.v1.ListOptions") + proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.client-go.pkg.api.v1.LoadBalancerIngress") + proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.client-go.pkg.api.v1.LoadBalancerStatus") + proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.client-go.pkg.api.v1.LocalObjectReference") + proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.LocalVolumeSource") + proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.NFSVolumeSource") + proto.RegisterType((*Namespace)(nil), "k8s.io.client-go.pkg.api.v1.Namespace") + proto.RegisterType((*NamespaceList)(nil), "k8s.io.client-go.pkg.api.v1.NamespaceList") + proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.client-go.pkg.api.v1.NamespaceSpec") + proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.client-go.pkg.api.v1.NamespaceStatus") + proto.RegisterType((*Node)(nil), "k8s.io.client-go.pkg.api.v1.Node") + proto.RegisterType((*NodeAddress)(nil), "k8s.io.client-go.pkg.api.v1.NodeAddress") + proto.RegisterType((*NodeAffinity)(nil), "k8s.io.client-go.pkg.api.v1.NodeAffinity") + proto.RegisterType((*NodeCondition)(nil), "k8s.io.client-go.pkg.api.v1.NodeCondition") + proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.client-go.pkg.api.v1.NodeDaemonEndpoints") + proto.RegisterType((*NodeList)(nil), "k8s.io.client-go.pkg.api.v1.NodeList") + proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.client-go.pkg.api.v1.NodeProxyOptions") + proto.RegisterType((*NodeResources)(nil), "k8s.io.client-go.pkg.api.v1.NodeResources") + proto.RegisterType((*NodeSelector)(nil), "k8s.io.client-go.pkg.api.v1.NodeSelector") + proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.client-go.pkg.api.v1.NodeSelectorRequirement") + proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.client-go.pkg.api.v1.NodeSelectorTerm") + proto.RegisterType((*NodeSpec)(nil), "k8s.io.client-go.pkg.api.v1.NodeSpec") + proto.RegisterType((*NodeStatus)(nil), "k8s.io.client-go.pkg.api.v1.NodeStatus") + proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.client-go.pkg.api.v1.NodeSystemInfo") + proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.client-go.pkg.api.v1.ObjectFieldSelector") + proto.RegisterType((*ObjectMeta)(nil), "k8s.io.client-go.pkg.api.v1.ObjectMeta") + proto.RegisterType((*ObjectReference)(nil), "k8s.io.client-go.pkg.api.v1.ObjectReference") + proto.RegisterType((*PersistentVolume)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolume") + proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaim") + proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaimList") + proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaimSpec") + proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaimStatus") + proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaimVolumeSource") + proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeList") + proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeSource") + proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeSpec") + proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeStatus") + proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.PhotonPersistentDiskVolumeSource") + proto.RegisterType((*Pod)(nil), "k8s.io.client-go.pkg.api.v1.Pod") + proto.RegisterType((*PodAffinity)(nil), "k8s.io.client-go.pkg.api.v1.PodAffinity") + proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.client-go.pkg.api.v1.PodAffinityTerm") + proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.client-go.pkg.api.v1.PodAntiAffinity") + proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodAttachOptions") + proto.RegisterType((*PodCondition)(nil), "k8s.io.client-go.pkg.api.v1.PodCondition") + proto.RegisterType((*PodExecOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodExecOptions") + proto.RegisterType((*PodList)(nil), "k8s.io.client-go.pkg.api.v1.PodList") + proto.RegisterType((*PodLogOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodLogOptions") + proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodPortForwardOptions") + proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodProxyOptions") + proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.client-go.pkg.api.v1.PodSecurityContext") + proto.RegisterType((*PodSignature)(nil), "k8s.io.client-go.pkg.api.v1.PodSignature") + proto.RegisterType((*PodSpec)(nil), "k8s.io.client-go.pkg.api.v1.PodSpec") + proto.RegisterType((*PodStatus)(nil), "k8s.io.client-go.pkg.api.v1.PodStatus") + proto.RegisterType((*PodStatusResult)(nil), "k8s.io.client-go.pkg.api.v1.PodStatusResult") + proto.RegisterType((*PodTemplate)(nil), "k8s.io.client-go.pkg.api.v1.PodTemplate") + proto.RegisterType((*PodTemplateList)(nil), "k8s.io.client-go.pkg.api.v1.PodTemplateList") + proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.client-go.pkg.api.v1.PodTemplateSpec") + proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.PortworxVolumeSource") + proto.RegisterType((*Preconditions)(nil), "k8s.io.client-go.pkg.api.v1.Preconditions") + proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.client-go.pkg.api.v1.PreferAvoidPodsEntry") + proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.client-go.pkg.api.v1.PreferredSchedulingTerm") + proto.RegisterType((*Probe)(nil), "k8s.io.client-go.pkg.api.v1.Probe") + proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.ProjectedVolumeSource") + proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.QuobyteVolumeSource") + proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.RBDVolumeSource") + proto.RegisterType((*RangeAllocation)(nil), "k8s.io.client-go.pkg.api.v1.RangeAllocation") + proto.RegisterType((*ReplicationController)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationController") + proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationControllerCondition") + proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationControllerList") + proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationControllerSpec") + proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationControllerStatus") + proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.client-go.pkg.api.v1.ResourceFieldSelector") + proto.RegisterType((*ResourceQuota)(nil), "k8s.io.client-go.pkg.api.v1.ResourceQuota") + proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.client-go.pkg.api.v1.ResourceQuotaList") + proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.client-go.pkg.api.v1.ResourceQuotaSpec") + proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.client-go.pkg.api.v1.ResourceQuotaStatus") + proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.client-go.pkg.api.v1.ResourceRequirements") + proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.client-go.pkg.api.v1.SELinuxOptions") + proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.ScaleIOVolumeSource") + proto.RegisterType((*Secret)(nil), "k8s.io.client-go.pkg.api.v1.Secret") + proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.client-go.pkg.api.v1.SecretEnvSource") + proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.client-go.pkg.api.v1.SecretKeySelector") + proto.RegisterType((*SecretList)(nil), "k8s.io.client-go.pkg.api.v1.SecretList") + proto.RegisterType((*SecretProjection)(nil), "k8s.io.client-go.pkg.api.v1.SecretProjection") + proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.SecretVolumeSource") + proto.RegisterType((*SecurityContext)(nil), "k8s.io.client-go.pkg.api.v1.SecurityContext") + proto.RegisterType((*SerializedReference)(nil), "k8s.io.client-go.pkg.api.v1.SerializedReference") + proto.RegisterType((*Service)(nil), "k8s.io.client-go.pkg.api.v1.Service") + proto.RegisterType((*ServiceAccount)(nil), "k8s.io.client-go.pkg.api.v1.ServiceAccount") + proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.client-go.pkg.api.v1.ServiceAccountList") + proto.RegisterType((*ServiceList)(nil), "k8s.io.client-go.pkg.api.v1.ServiceList") + proto.RegisterType((*ServicePort)(nil), "k8s.io.client-go.pkg.api.v1.ServicePort") + proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.client-go.pkg.api.v1.ServiceProxyOptions") + proto.RegisterType((*ServiceSpec)(nil), "k8s.io.client-go.pkg.api.v1.ServiceSpec") + proto.RegisterType((*ServiceStatus)(nil), "k8s.io.client-go.pkg.api.v1.ServiceStatus") + proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.StorageOSPersistentVolumeSource") + proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.StorageOSVolumeSource") + proto.RegisterType((*Sysctl)(nil), "k8s.io.client-go.pkg.api.v1.Sysctl") + proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.client-go.pkg.api.v1.TCPSocketAction") + proto.RegisterType((*Taint)(nil), "k8s.io.client-go.pkg.api.v1.Taint") + proto.RegisterType((*Toleration)(nil), "k8s.io.client-go.pkg.api.v1.Toleration") + proto.RegisterType((*Volume)(nil), "k8s.io.client-go.pkg.api.v1.Volume") + proto.RegisterType((*VolumeMount)(nil), "k8s.io.client-go.pkg.api.v1.VolumeMount") + proto.RegisterType((*VolumeProjection)(nil), "k8s.io.client-go.pkg.api.v1.VolumeProjection") + proto.RegisterType((*VolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.VolumeSource") + proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.VsphereVirtualDiskVolumeSource") + proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.client-go.pkg.api.v1.WeightedPodAffinityTerm") +} +func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i += copy(dAtA[i:], m.VolumeID) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *Affinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Affinity) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NodeAffinity != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) + n1, err := m.NodeAffinity.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.PodAffinity != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinity.Size())) + n2, err := m.PodAffinity.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.PodAntiAffinity != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodAntiAffinity.Size())) + n3, err := m.PodAntiAffinity.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *AttachedVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttachedVolume) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i += copy(dAtA[i:], m.DevicePath) + return i, nil +} + +func (m *AvoidPods) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AvoidPods) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.PreferAvoidPods) > 0 { + for _, msg := range m.PreferAvoidPods { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AzureDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AzureDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskName))) + i += copy(dAtA[i:], m.DiskName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataDiskURI))) + i += copy(dAtA[i:], m.DataDiskURI) + if m.CachingMode != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CachingMode))) + i += copy(dAtA[i:], *m.CachingMode) + } + if m.FSType != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i += copy(dAtA[i:], *m.FSType) + } + if m.ReadOnly != nil { + dAtA[i] = 0x28 + i++ + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Kind != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) + i += copy(dAtA[i:], *m.Kind) + } + return i, nil +} + +func (m *AzureFileVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AzureFileVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i += copy(dAtA[i:], m.SecretName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) + i += copy(dAtA[i:], m.ShareName) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *Binding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Binding) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) + n5, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *Capabilities) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Add) > 0 { + for _, s := range m.Add { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Drop) > 0 { + for _, s := range m.Drop { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) + i += copy(dAtA[i:], m.SecretFile) + if m.SecretRef != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n6, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + dAtA[i] = 0x30 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i += copy(dAtA[i:], m.VolumeID) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ComponentCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + return i, nil +} + +func (m *ComponentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ComponentStatusList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n8, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ConfigMap) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + if len(m.Data) > 0 { + keysForData := make([]string, 0, len(m.Data)) + for k := range m.Data { + keysForData = append(keysForData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + for _, k := range keysForData { + dAtA[i] = 0x12 + i++ + v := m.Data[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *ConfigMapEnvSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n10, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + if m.Optional != nil { + dAtA[i] = 0x10 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ConfigMapKeySelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n11, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + if m.Optional != nil { + dAtA[i] = 0x18 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ConfigMapList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n12, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ConfigMapProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n13, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Optional != nil { + dAtA[i] = 0x20 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ConfigMapVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n14, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.DefaultMode != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + } + if m.Optional != nil { + dAtA[i] = 0x20 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Container) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Container) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i += copy(dAtA[i:], m.Image) + if len(m.Command) > 0 { + for _, s := range m.Command { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) + i += copy(dAtA[i:], m.WorkingDir) + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Env) > 0 { + for _, msg := range m.Env { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) + n15, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + if len(m.VolumeMounts) > 0 { + for _, msg := range m.VolumeMounts { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.LivenessProbe != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LivenessProbe.Size())) + n16, err := m.LivenessProbe.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.ReadinessProbe != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadinessProbe.Size())) + n17, err := m.ReadinessProbe.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.Lifecycle != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Lifecycle.Size())) + n18, err := m.Lifecycle.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) + i += copy(dAtA[i:], m.TerminationMessagePath) + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) + i += copy(dAtA[i:], m.ImagePullPolicy) + if m.SecurityContext != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) + n19, err := m.SecurityContext.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x1 + i++ + if m.StdinOnce { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x90 + i++ + dAtA[i] = 0x1 + i++ + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.EnvFrom) > 0 { + for _, msg := range m.EnvFrom { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) + i += copy(dAtA[i:], m.TerminationMessagePolicy) + return i, nil +} + +func (m *ContainerImage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerImage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SizeBytes)) + return i, nil +} + +func (m *ContainerPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerPort) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPort)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ContainerPort)) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i += copy(dAtA[i:], m.Protocol) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i += copy(dAtA[i:], m.HostIP) + return i, nil +} + +func (m *ContainerState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Waiting != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Waiting.Size())) + n20, err := m.Waiting.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.Running != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Running.Size())) + n21, err := m.Running.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.Terminated != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Terminated.Size())) + n22, err := m.Terminated.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + return i, nil +} + +func (m *ContainerStateRunning) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) + n23, err := m.StartedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + return i, nil +} + +func (m *ContainerStateTerminated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ExitCode)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Signal)) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) + n24, err := m.StartedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FinishedAt.Size())) + n25, err := m.FinishedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + return i, nil +} + +func (m *ContainerStateWaiting) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStateWaiting) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.State.Size())) + n26, err := m.State.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTerminationState.Size())) + n27, err := m.LastTerminationState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + dAtA[i] = 0x20 + i++ + if m.Ready { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RestartCount)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i += copy(dAtA[i:], m.Image) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) + i += copy(dAtA[i:], m.ImageID) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + return i, nil +} + +func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonEndpoint) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + return i, nil +} + +func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.GracePeriodSeconds != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Preconditions.Size())) + n28, err := m.Preconditions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if m.OrphanDependents != nil { + dAtA[i] = 0x18 + i++ + if *m.OrphanDependents { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.PropagationPolicy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) + i += copy(dAtA[i:], *m.PropagationPolicy) + } + return i, nil +} + +func (m *DownwardAPIProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DownwardAPIProjection) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DownwardAPIVolumeFile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + if m.FieldRef != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) + n29, err := m.FieldRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + if m.ResourceFieldRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) + n30, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + if m.Mode != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + } + return i, nil +} + +func (m *DownwardAPIVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DownwardAPIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.DefaultMode != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + } + return i, nil +} + +func (m *EmptyDirVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium))) + i += copy(dAtA[i:], m.Medium) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size())) + n31, err := m.SizeLimit.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + return i, nil +} + +func (m *EndpointAddress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i += copy(dAtA[i:], m.IP) + if m.TargetRef != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetRef.Size())) + n32, err := m.TargetRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + if m.NodeName != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i += copy(dAtA[i:], *m.NodeName) + } + return i, nil +} + +func (m *EndpointPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i += copy(dAtA[i:], m.Protocol) + return i, nil +} + +func (m *EndpointSubset) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSubset) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Addresses) > 0 { + for _, msg := range m.Addresses { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.NotReadyAddresses) > 0 { + for _, msg := range m.NotReadyAddresses { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Endpoints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + if len(m.Subsets) > 0 { + for _, msg := range m.Subsets { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EndpointsList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n34, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EnvFromSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) + i += copy(dAtA[i:], m.Prefix) + if m.ConfigMapRef != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) + n35, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } + if m.SecretRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n36, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + } + return i, nil +} + +func (m *EnvVar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + if m.ValueFrom != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ValueFrom.Size())) + n37, err := m.ValueFrom.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + } + return i, nil +} + +func (m *EnvVarSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.FieldRef != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) + n38, err := m.FieldRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + if m.ResourceFieldRef != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) + n39, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + if m.ConfigMapKeyRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapKeyRef.Size())) + n40, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + } + if m.SecretKeyRef != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretKeyRef.Size())) + n41, err := m.SecretKeyRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + return i, nil +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n42, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.InvolvedObject.Size())) + n43, err := m.InvolvedObject.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) + n44, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FirstTimestamp.Size())) + n45, err := m.FirstTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTimestamp.Size())) + n46, err := m.LastTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + return i, nil +} + +func (m *EventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n47, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Component))) + i += copy(dAtA[i:], m.Component) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i += copy(dAtA[i:], m.Host) + return i, nil +} + +func (m *ExecAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecAction) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *FCVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TargetWWNs) > 0 { + for _, s := range m.TargetWWNs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Lun != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Lun)) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *FlexVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + if m.SecretRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n48, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + } + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) + for k := range m.Options { + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for _, k := range keysForOptions { + dAtA[i] = 0x2a + i++ + v := m.Options[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *FlockerVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlockerVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetName))) + i += copy(dAtA[i:], m.DatasetName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetUUID))) + i += copy(dAtA[i:], m.DatasetUUID) + return i, nil +} + +func (m *GCEPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GCEPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PDName))) + i += copy(dAtA[i:], m.PDName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repository))) + i += copy(dAtA[i:], m.Repository) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) + i += copy(dAtA[i:], m.Revision) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory))) + i += copy(dAtA[i:], m.Directory) + return i, nil +} + +func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GlusterfsVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) + i += copy(dAtA[i:], m.EndpointsName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *HTTPGetAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) + n49, err := m.Port.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i += copy(dAtA[i:], m.Host) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scheme))) + i += copy(dAtA[i:], m.Scheme) + if len(m.HTTPHeaders) > 0 { + for _, msg := range m.HTTPHeaders { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HTTPHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + return i, nil +} + +func (m *Handler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Handler) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Exec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size())) + n50, err := m.Exec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + } + if m.HTTPGet != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) + n51, err := m.HTTPGet.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 + } + if m.TCPSocket != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) + n52, err := m.TCPSocket.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + } + return i, nil +} + +func (m *HostAlias) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostAlias) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i += copy(dAtA[i:], m.IP) + if len(m.Hostnames) > 0 { + for _, s := range m.Hostnames { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + return i, nil +} + +func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i += copy(dAtA[i:], m.TargetPortal) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i += copy(dAtA[i:], m.IQN) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i += copy(dAtA[i:], m.ISCSIInterface) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x30 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.Portals) > 0 { + for _, s := range m.Portals { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x40 + i++ + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SecretRef != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n53, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 + } + dAtA[i] = 0x58 + i++ + if m.SessionCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *KeyToPath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyToPath) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + if m.Mode != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + } + return i, nil +} + +func (m *Lifecycle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PostStart != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size())) + n54, err := m.PostStart.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n54 + } + if m.PreStop != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size())) + n55, err := m.PreStop.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n55 + } + return i, nil +} + +func (m *LimitRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n56, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n56 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n57, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n57 + return i, nil +} + +func (m *LimitRangeItem) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if len(m.Max) > 0 { + keysForMax := make([]string, 0, len(m.Max)) + for k := range m.Max { + keysForMax = append(keysForMax, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + for _, k := range keysForMax { + dAtA[i] = 0x12 + i++ + v := m.Max[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n58, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 + } + } + if len(m.Min) > 0 { + keysForMin := make([]string, 0, len(m.Min)) + for k := range m.Min { + keysForMin = append(keysForMin, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + for _, k := range keysForMin { + dAtA[i] = 0x1a + i++ + v := m.Min[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n59, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n59 + } + } + if len(m.Default) > 0 { + keysForDefault := make([]string, 0, len(m.Default)) + for k := range m.Default { + keysForDefault = append(keysForDefault, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + for _, k := range keysForDefault { + dAtA[i] = 0x22 + i++ + v := m.Default[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n60, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n60 + } + } + if len(m.DefaultRequest) > 0 { + keysForDefaultRequest := make([]string, 0, len(m.DefaultRequest)) + for k := range m.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + for _, k := range keysForDefaultRequest { + dAtA[i] = 0x2a + i++ + v := m.DefaultRequest[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n61, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n61 + } + } + if len(m.MaxLimitRequestRatio) > 0 { + keysForMaxLimitRequestRatio := make([]string, 0, len(m.MaxLimitRequestRatio)) + for k := range m.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + for _, k := range keysForMaxLimitRequestRatio { + dAtA[i] = 0x32 + i++ + v := m.MaxLimitRequestRatio[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n62, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n62 + } + } + return i, nil +} + +func (m *LimitRangeList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n63, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n63 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LimitRangeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitRangeSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Limits) > 0 { + for _, msg := range m.Limits { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *List) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *List) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n64, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n64 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ListOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) + i += copy(dAtA[i:], m.LabelSelector) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) + i += copy(dAtA[i:], m.FieldSelector) + dAtA[i] = 0x18 + i++ + if m.Watch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + if m.TimeoutSeconds != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + } + dAtA[i] = 0x30 + i++ + if m.IncludeUninitialized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i += copy(dAtA[i:], m.IP) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + return i, nil +} + +func (m *LoadBalancerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ingress) > 0 { + for _, msg := range m.Ingress { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} + +func (m *LocalVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + return i, nil +} + +func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Server))) + i += copy(dAtA[i:], m.Server) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *Namespace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n65, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n65 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n66, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n66 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n67, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n67 + return i, nil +} + +func (m *NamespaceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n68, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n68 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NamespaceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamespaceSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *NamespaceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamespaceStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + return i, nil +} + +func (m *Node) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Node) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n69, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n69 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n70, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n70 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n71, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n71 + return i, nil +} + +func (m *NodeAddress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeAddress) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Address))) + i += copy(dAtA[i:], m.Address) + return i, nil +} + +func (m *NodeAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) + n72, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n72 + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size())) + n73, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n73 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n74, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n74 + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *NodeDaemonEndpoints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size())) + n75, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n75 + return i, nil +} + +func (m *NodeList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n76, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n76 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeProxyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeProxyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + return i, nil +} + +func (m *NodeResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for _, k := range keysForCapacity { + dAtA[i] = 0xa + i++ + v := m.Capacity[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n77, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n77 + } + } + return i, nil +} + +func (m *NodeSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeSelectorTerms) > 0 { + for _, msg := range m.NodeSelectorTerms { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i += copy(dAtA[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *NodeSelectorTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSelectorTerm) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR))) + i += copy(dAtA[i:], m.PodCIDR) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalID))) + i += copy(dAtA[i:], m.ExternalID) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID))) + i += copy(dAtA[i:], m.ProviderID) + dAtA[i] = 0x20 + i++ + if m.Unschedulable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.Taints) > 0 { + for _, msg := range m.Taints { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for _, k := range keysForCapacity { + dAtA[i] = 0xa + i++ + v := m.Capacity[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n78, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n78 + } + } + if len(m.Allocatable) > 0 { + keysForAllocatable := make([]string, 0, len(m.Allocatable)) + for k := range m.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + for _, k := range keysForAllocatable { + dAtA[i] = 0x12 + i++ + v := m.Allocatable[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n79, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n79 + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Addresses) > 0 { + for _, msg := range m.Addresses { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size())) + n80, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n80 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) + n81, err := m.NodeInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n81 + if len(m.Images) > 0 { + for _, msg := range m.Images { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.VolumesInUse) > 0 { + for _, s := range m.VolumesInUse { + dAtA[i] = 0x4a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.VolumesAttached) > 0 { + for _, msg := range m.VolumesAttached { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSystemInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MachineID))) + i += copy(dAtA[i:], m.MachineID) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SystemUUID))) + i += copy(dAtA[i:], m.SystemUUID) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BootID))) + i += copy(dAtA[i:], m.BootID) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KernelVersion))) + i += copy(dAtA[i:], m.KernelVersion) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OSImage))) + i += copy(dAtA[i:], m.OSImage) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerRuntimeVersion))) + i += copy(dAtA[i:], m.ContainerRuntimeVersion) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletVersion))) + i += copy(dAtA[i:], m.KubeletVersion) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeProxyVersion))) + i += copy(dAtA[i:], m.KubeProxyVersion) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OperatingSystem))) + i += copy(dAtA[i:], m.OperatingSystem) + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) + i += copy(dAtA[i:], m.Architecture) + return i, nil +} + +func (m *ObjectFieldSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectFieldSelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i += copy(dAtA[i:], m.FieldPath) + return i, nil +} + +func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) + i += copy(dAtA[i:], m.GenerateName) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i += copy(dAtA[i:], m.SelfLink) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) + n82, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n82 + if m.DeletionTimestamp != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) + n83, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n83 + } + if m.DeletionGracePeriodSeconds != nil { + dAtA[i] = 0x50 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for _, k := range keysForLabels { + dAtA[i] = 0x5a + i++ + v := m.Labels[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for _, k := range keysForAnnotations { + dAtA[i] = 0x62 + i++ + v := m.Annotations[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.OwnerReferences) > 0 { + for _, msg := range m.OwnerReferences { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + dAtA[i] = 0x72 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) + i += copy(dAtA[i:], m.ClusterName) + if m.Initializers != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) + n84, err := m.Initializers.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n84 + } + return i, nil +} + +func (m *ObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i += copy(dAtA[i:], m.FieldPath) + return i, nil +} + +func (m *PersistentVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n85, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n85 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n86, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n86 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n87, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n87 + return i, nil +} + +func (m *PersistentVolumeClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n88, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n88 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n89, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n89 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n90, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n90 + return i, nil +} + +func (m *PersistentVolumeClaimList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n91, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n91 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PersistentVolumeClaimSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) + n92, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n92 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i += copy(dAtA[i:], m.VolumeName) + if m.Selector != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n93, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n93 + } + if m.StorageClassName != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) + i += copy(dAtA[i:], *m.StorageClassName) + } + return i, nil +} + +func (m *PersistentVolumeClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for _, k := range keysForCapacity { + dAtA[i] = 0x1a + i++ + v := m.Capacity[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n94, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n94 + } + } + return i, nil +} + +func (m *PersistentVolumeClaimVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClaimName))) + i += copy(dAtA[i:], m.ClaimName) + dAtA[i] = 0x10 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *PersistentVolumeList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n95, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n95 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.GCEPersistentDisk != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) + n96, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n96 + } + if m.AWSElasticBlockStore != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) + n97, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n97 + } + if m.HostPath != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) + n98, err := m.HostPath.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n98 + } + if m.Glusterfs != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) + n99, err := m.Glusterfs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n99 + } + if m.NFS != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) + n100, err := m.NFS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n100 + } + if m.RBD != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) + n101, err := m.RBD.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n101 + } + if m.ISCSI != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) + n102, err := m.ISCSI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n102 + } + if m.Cinder != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) + n103, err := m.Cinder.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n103 + } + if m.CephFS != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) + n104, err := m.CephFS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n104 + } + if m.FC != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) + n105, err := m.FC.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n105 + } + if m.Flocker != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) + n106, err := m.Flocker.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n106 + } + if m.FlexVolume != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) + n107, err := m.FlexVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n107 + } + if m.AzureFile != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) + n108, err := m.AzureFile.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n108 + } + if m.VsphereVolume != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) + n109, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n109 + } + if m.Quobyte != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) + n110, err := m.Quobyte.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n110 + } + if m.AzureDisk != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) + n111, err := m.AzureDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n111 + } + if m.PhotonPersistentDisk != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) + n112, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n112 + } + if m.PortworxVolume != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) + n113, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n113 + } + if m.ScaleIO != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) + n114, err := m.ScaleIO.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n114 + } + if m.Local != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size())) + n115, err := m.Local.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n115 + } + if m.StorageOS != nil { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) + n116, err := m.StorageOS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n116 + } + return i, nil +} + +func (m *PersistentVolumeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for _, k := range keysForCapacity { + dAtA[i] = 0xa + i++ + v := m.Capacity[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n117, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n117 + } + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size())) + n118, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n118 + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.ClaimRef != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size())) + n119, err := m.ClaimRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n119 + } + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PersistentVolumeReclaimPolicy))) + i += copy(dAtA[i:], m.PersistentVolumeReclaimPolicy) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageClassName))) + i += copy(dAtA[i:], m.StorageClassName) + return i, nil +} + +func (m *PersistentVolumeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + return i, nil +} + +func (m *PhotonPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PhotonPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PdID))) + i += copy(dAtA[i:], m.PdID) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + return i, nil +} + +func (m *Pod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Pod) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n120, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n120 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n121, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n121 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n122, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n122 + return i, nil +} + +func (m *PodAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodAffinity) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodAffinityTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.LabelSelector != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size())) + n123, err := m.LabelSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n123 + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) + i += copy(dAtA[i:], m.TopologyKey) + return i, nil +} + +func (m *PodAntiAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodAntiAffinity) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodAttachOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodAttachOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x10 + i++ + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x18 + i++ + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x20 + i++ + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i += copy(dAtA[i:], m.Container) + return i, nil +} + +func (m *PodCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) + n124, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n124 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n125, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n125 + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodExecOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x10 + i++ + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x18 + i++ + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x20 + i++ + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i += copy(dAtA[i:], m.Container) + if len(m.Command) > 0 { + for _, s := range m.Command { + dAtA[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *PodList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n126, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n126 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodLogOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i += copy(dAtA[i:], m.Container) + dAtA[i] = 0x10 + i++ + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x18 + i++ + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SinceSeconds != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) + n127, err := m.SinceTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n127 + } + dAtA[i] = 0x30 + i++ + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.TailLines != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + } + return i, nil +} + +func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodPortForwardOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, num := range m.Ports { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(num)) + } + } + return i, nil +} + +func (m *PodProxyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodProxyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + return i, nil +} + +func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.SELinuxOptions != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) + n128, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n128 + } + if m.RunAsUser != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + dAtA[i] = 0x18 + i++ + if *m.RunAsNonRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.SupplementalGroups) > 0 { + for _, num := range m.SupplementalGroups { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(num)) + } + } + if m.FSGroup != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.FSGroup)) + } + return i, nil +} + +func (m *PodSignature) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PodController != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size())) + n129, err := m.PodController.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n129 + } + return i, nil +} + +func (m *PodSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Volumes) > 0 { + for _, msg := range m.Volumes { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Containers) > 0 { + for _, msg := range m.Containers { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) + i += copy(dAtA[i:], m.RestartPolicy) + if m.TerminationGracePeriodSeconds != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) + } + if m.ActiveDeadlineSeconds != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + } + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy))) + i += copy(dAtA[i:], m.DNSPolicy) + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for _, k := range keysForNodeSelector { + dAtA[i] = 0x3a + i++ + v := m.NodeSelector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i += copy(dAtA[i:], m.ServiceAccountName) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount))) + i += copy(dAtA[i:], m.DeprecatedServiceAccount) + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + dAtA[i] = 0x58 + i++ + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x60 + i++ + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x68 + i++ + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SecurityContext != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) + n130, err := m.SecurityContext.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n130 + } + if len(m.ImagePullSecrets) > 0 { + for _, msg := range m.ImagePullSecrets { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) + i += copy(dAtA[i:], m.Subdomain) + if m.Affinity != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) + n131, err := m.Affinity.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n131 + } + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) + i += copy(dAtA[i:], m.SchedulerName) + if len(m.InitContainers) > 0 { + for _, msg := range m.InitContainers { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.AutomountServiceAccountToken != nil { + dAtA[i] = 0xa8 + i++ + dAtA[i] = 0x1 + i++ + if *m.AutomountServiceAccountToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Tolerations) > 0 { + for _, msg := range m.Tolerations { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.HostAliases) > 0 { + for _, msg := range m.HostAliases { + dAtA[i] = 0xba + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i += copy(dAtA[i:], m.HostIP) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) + i += copy(dAtA[i:], m.PodIP) + if m.StartTime != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) + n132, err := m.StartTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n132 + } + if len(m.ContainerStatuses) > 0 { + for _, msg := range m.ContainerStatuses { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.QOSClass))) + i += copy(dAtA[i:], m.QOSClass) + if len(m.InitContainerStatuses) > 0 { + for _, msg := range m.InitContainerStatuses { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodStatusResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n133, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n133 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n134, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n134 + return i, nil +} + +func (m *PodTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n135, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n135 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n136, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n136 + return i, nil +} + +func (m *PodTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n137, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n137 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n138, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n138 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n139, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n139 + return i, nil +} + +func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortworxVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i += copy(dAtA[i:], m.VolumeID) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *Preconditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.UID != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i += copy(dAtA[i:], *m.UID) + } + return i, nil +} + +func (m *PreferAvoidPodsEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size())) + n140, err := m.PodSignature.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n140 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) + n141, err := m.EvictionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n141 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *PreferredSchedulingTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size())) + n142, err := m.Preference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n142 + return i, nil +} + +func (m *Probe) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Probe) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size())) + n143, err := m.Handler.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n143 + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TimeoutSeconds)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SuccessThreshold)) + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FailureThreshold)) + return i, nil +} + +func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectedVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Sources) > 0 { + for _, msg := range m.Sources { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.DefaultMode != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + } + return i, nil +} + +func (m *QuobyteVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) + i += copy(dAtA[i:], m.Registry) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volume))) + i += copy(dAtA[i:], m.Volume) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + return i, nil +} + +func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) + i += copy(dAtA[i:], m.RBDImage) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) + i += copy(dAtA[i:], m.RBDPool) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) + i += copy(dAtA[i:], m.RadosUser) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) + i += copy(dAtA[i:], m.Keyring) + if m.SecretRef != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n144, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n144 + } + dAtA[i] = 0x40 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *RangeAllocation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n145, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n145 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) + i += copy(dAtA[i:], m.Range) + if m.Data != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func (m *ReplicationController) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n146, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n146 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n147, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n147 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n148, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n148 + return i, nil +} + +func (m *ReplicationControllerCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n149, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n149 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *ReplicationControllerList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n150, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n150 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReplicationControllerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for _, k := range keysForSelector { + dAtA[i] = 0x12 + i++ + v := m.Selector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.Template != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n151, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n151 + } + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + return i, nil +} + +func (m *ReplicationControllerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i += copy(dAtA[i:], m.ContainerName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i += copy(dAtA[i:], m.Resource) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size())) + n152, err := m.Divisor.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n152 + return i, nil +} + +func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n153, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n153 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n154, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n154 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n155, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n155 + return i, nil +} + +func (m *ResourceQuotaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n156, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n156 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceQuotaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hard) > 0 { + keysForHard := make([]string, 0, len(m.Hard)) + for k := range m.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + for _, k := range keysForHard { + dAtA[i] = 0xa + i++ + v := m.Hard[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n157, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n157 + } + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ResourceQuotaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hard) > 0 { + keysForHard := make([]string, 0, len(m.Hard)) + for k := range m.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + for _, k := range keysForHard { + dAtA[i] = 0xa + i++ + v := m.Hard[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n158, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n158 + } + } + if len(m.Used) > 0 { + keysForUsed := make([]string, 0, len(m.Used)) + for k := range m.Used { + keysForUsed = append(keysForUsed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + for _, k := range keysForUsed { + dAtA[i] = 0x12 + i++ + v := m.Used[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n159, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n159 + } + } + return i, nil +} + +func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Limits) > 0 { + keysForLimits := make([]string, 0, len(m.Limits)) + for k := range m.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + for _, k := range keysForLimits { + dAtA[i] = 0xa + i++ + v := m.Limits[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n160, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n160 + } + } + if len(m.Requests) > 0 { + keysForRequests := make([]string, 0, len(m.Requests)) + for k := range m.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + for _, k := range keysForRequests { + dAtA[i] = 0x12 + i++ + v := m.Requests[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n161, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n161 + } + } + return i, nil +} + +func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i += copy(dAtA[i:], m.Level) + return i, nil +} + +func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i += copy(dAtA[i:], m.Gateway) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) + i += copy(dAtA[i:], m.System) + if m.SecretRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n162, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n162 + } + dAtA[i] = 0x20 + i++ + if m.SSLEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) + i += copy(dAtA[i:], m.ProtectionDomain) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) + i += copy(dAtA[i:], m.StoragePool) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) + i += copy(dAtA[i:], m.StorageMode) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i += copy(dAtA[i:], m.VolumeName) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x50 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *Secret) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Secret) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n163, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n163 + if len(m.Data) > 0 { + keysForData := make([]string, 0, len(m.Data)) + for k := range m.Data { + keysForData = append(keysForData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + for _, k := range keysForData { + dAtA[i] = 0x12 + i++ + v := m.Data[string(k)] + byteSize := 0 + if v != nil { + byteSize = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + byteSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if len(m.StringData) > 0 { + keysForStringData := make([]string, 0, len(m.StringData)) + for k := range m.StringData { + keysForStringData = append(keysForStringData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + for _, k := range keysForStringData { + dAtA[i] = 0x22 + i++ + v := m.StringData[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *SecretEnvSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n164, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n164 + if m.Optional != nil { + dAtA[i] = 0x10 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecretKeySelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n165, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n165 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + if m.Optional != nil { + dAtA[i] = 0x18 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecretList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n166, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n166 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SecretProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n167, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n167 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Optional != nil { + dAtA[i] = 0x20 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecretVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i += copy(dAtA[i:], m.SecretName) + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.DefaultMode != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + } + if m.Optional != nil { + dAtA[i] = 0x20 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Capabilities != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size())) + n168, err := m.Capabilities.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n168 + } + if m.Privileged != nil { + dAtA[i] = 0x10 + i++ + if *m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.SELinuxOptions != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) + n169, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n169 + } + if m.RunAsUser != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + dAtA[i] = 0x28 + i++ + if *m.RunAsNonRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ReadOnlyRootFilesystem != nil { + dAtA[i] = 0x30 + i++ + if *m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SerializedReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size())) + n170, err := m.Reference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n170 + return i, nil +} + +func (m *Service) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Service) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n171, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n171 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n172, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n172 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n173, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n173 + return i, nil +} + +func (m *ServiceAccount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n174, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n174 + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.ImagePullSecrets) > 0 { + for _, msg := range m.ImagePullSecrets { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.AutomountServiceAccountToken != nil { + dAtA[i] = 0x20 + i++ + if *m.AutomountServiceAccountToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ServiceAccountList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n175, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n175 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ServiceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n176, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n176 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ServicePort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i += copy(dAtA[i:], m.Protocol) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size())) + n177, err := m.TargetPort.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n177 + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) + return i, nil +} + +func (m *ServiceProxyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceProxyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + return i, nil +} + +func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for _, k := range keysForSelector { + dAtA[i] = 0x12 + i++ + v := m.Selector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP))) + i += copy(dAtA[i:], m.ClusterIP) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if len(m.ExternalIPs) > 0 { + for _, s := range m.ExternalIPs { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SessionAffinity))) + i += copy(dAtA[i:], m.SessionAffinity) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerIP))) + i += copy(dAtA[i:], m.LoadBalancerIP) + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + dAtA[i] = 0x4a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalName))) + i += copy(dAtA[i:], m.ExternalName) + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalTrafficPolicy))) + i += copy(dAtA[i:], m.ExternalTrafficPolicy) + dAtA[i] = 0x60 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HealthCheckNodePort)) + return i, nil +} + +func (m *ServiceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) + n178, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n178 + return i, nil +} + +func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i += copy(dAtA[i:], m.VolumeName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) + i += copy(dAtA[i:], m.VolumeNamespace) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SecretRef != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n179, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n179 + } + return i, nil +} + +func (m *StorageOSVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i += copy(dAtA[i:], m.VolumeName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) + i += copy(dAtA[i:], m.VolumeNamespace) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SecretRef != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n180, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n180 + } + return i, nil +} + +func (m *Sysctl) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sysctl) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + return i, nil +} + +func (m *TCPSocketAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) + n181, err := m.Port.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n181 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i += copy(dAtA[i:], m.Host) + return i, nil +} + +func (m *Taint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Taint) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i += copy(dAtA[i:], m.Effect) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) + n182, err := m.TimeAdded.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n182 + return i, nil +} + +func (m *Toleration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Toleration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i += copy(dAtA[i:], m.Operator) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i += copy(dAtA[i:], m.Effect) + if m.TolerationSeconds != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) + } + return i, nil +} + +func (m *Volume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Volume) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size())) + n183, err := m.VolumeSource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n183 + return i, nil +} + +func (m *VolumeMount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) + i += copy(dAtA[i:], m.MountPath) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) + i += copy(dAtA[i:], m.SubPath) + return i, nil +} + +func (m *VolumeProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) + n184, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n184 + } + if m.DownwardAPI != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) + n185, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n185 + } + if m.ConfigMap != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) + n186, err := m.ConfigMap.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n186 + } + return i, nil +} + +func (m *VolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.HostPath != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) + n187, err := m.HostPath.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n187 + } + if m.EmptyDir != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) + n188, err := m.EmptyDir.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n188 + } + if m.GCEPersistentDisk != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) + n189, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n189 + } + if m.AWSElasticBlockStore != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) + n190, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n190 + } + if m.GitRepo != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) + n191, err := m.GitRepo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n191 + } + if m.Secret != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) + n192, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n192 + } + if m.NFS != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) + n193, err := m.NFS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n193 + } + if m.ISCSI != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) + n194, err := m.ISCSI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n194 + } + if m.Glusterfs != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) + n195, err := m.Glusterfs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n195 + } + if m.PersistentVolumeClaim != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) + n196, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n196 + } + if m.RBD != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) + n197, err := m.RBD.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n197 + } + if m.FlexVolume != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) + n198, err := m.FlexVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n198 + } + if m.Cinder != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) + n199, err := m.Cinder.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n199 + } + if m.CephFS != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) + n200, err := m.CephFS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n200 + } + if m.Flocker != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) + n201, err := m.Flocker.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n201 + } + if m.DownwardAPI != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) + n202, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n202 + } + if m.FC != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) + n203, err := m.FC.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n203 + } + if m.AzureFile != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) + n204, err := m.AzureFile.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n204 + } + if m.ConfigMap != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) + n205, err := m.ConfigMap.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n205 + } + if m.VsphereVolume != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) + n206, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n206 + } + if m.Quobyte != nil { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) + n207, err := m.Quobyte.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n207 + } + if m.AzureDisk != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) + n208, err := m.AzureDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n208 + } + if m.PhotonPersistentDisk != nil { + dAtA[i] = 0xba + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) + n209, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n209 + } + if m.PortworxVolume != nil { + dAtA[i] = 0xc2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) + n210, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n210 + } + if m.ScaleIO != nil { + dAtA[i] = 0xca + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) + n211, err := m.ScaleIO.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n211 + } + if m.Projected != nil { + dAtA[i] = 0xd2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) + n212, err := m.Projected.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n212 + } + if m.StorageOS != nil { + dAtA[i] = 0xda + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) + n213, err := m.StorageOS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n213 + } + return i, nil +} + +func (m *VsphereVirtualDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VsphereVirtualDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumePath))) + i += copy(dAtA[i:], m.VolumePath) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyName))) + i += copy(dAtA[i:], m.StoragePolicyName) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyID))) + i += copy(dAtA[i:], m.StoragePolicyID) + return i, nil +} + +func (m *WeightedPodAffinityTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) + n214, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n214 + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 + return n +} + +func (m *Affinity) Size() (n int) { + var l int + _ = l + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAffinity != nil { + l = m.PodAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAntiAffinity != nil { + l = m.PodAntiAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AttachedVolume) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AvoidPods) Size() (n int) { + var l int + _ = l + if len(m.PreferAvoidPods) > 0 { + for _, e := range m.PreferAvoidPods { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *AzureDiskVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.DiskName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DataDiskURI) + n += 1 + l + sovGenerated(uint64(l)) + if m.CachingMode != nil { + l = len(*m.CachingMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadOnly != nil { + n += 2 + } + if m.Kind != nil { + l = len(*m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AzureFileVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Binding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Capabilities) Size() (n int) { + var l int + _ = l + if len(m.Add) > 0 { + for _, s := range m.Add { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Drop) > 0 { + for _, s := range m.Drop { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CephFSVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretFile) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *CinderVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ComponentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ComponentStatus) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ComponentStatusList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConfigMap) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ConfigMapEnvSource) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *ConfigMapKeySelector) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *ConfigMapList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConfigMapProjection) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *ConfigMapVolumeSource) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *Container) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.WorkingDir) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Lifecycle != nil { + l = m.Lifecycle.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.TerminationMessagePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 3 + n += 3 + n += 3 + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TerminationMessagePolicy) + n += 2 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerImage) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.SizeBytes)) + return n +} + +func (m *ContainerPort) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HostPort)) + n += 1 + sovGenerated(uint64(m.ContainerPort)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerState) Size() (n int) { + var l int + _ = l + if m.Waiting != nil { + l = m.Waiting.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Running != nil { + l = m.Running.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Terminated != nil { + l = m.Terminated.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ContainerStateRunning) Size() (n int) { + var l int + _ = l + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerStateTerminated) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ExitCode)) + n += 1 + sovGenerated(uint64(m.Signal)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FinishedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerStateWaiting) Size() (n int) { + var l int + _ = l + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.State.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTerminationState.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 1 + sovGenerated(uint64(m.RestartCount)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImageID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonEndpoint) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + return n +} + +func (m *DeleteOptions) Size() (n int) { + var l int + _ = l + if m.GracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + l = m.Preconditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.OrphanDependents != nil { + n += 2 + } + if m.PropagationPolicy != nil { + l = len(*m.PropagationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DownwardAPIProjection) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DownwardAPIVolumeFile) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) + } + return n +} + +func (m *DownwardAPIVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + return n +} + +func (m *EmptyDirVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Medium) + n += 1 + l + sovGenerated(uint64(l)) + l = m.SizeLimit.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EndpointAddress) Size() (n int) { + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetRef != nil { + l = m.TargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EndpointPort) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EndpointSubset) Size() (n int) { + var l int + _ = l + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NotReadyAddresses) > 0 { + for _, e := range m.NotReadyAddresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Endpoints) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subsets) > 0 { + for _, e := range m.Subsets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EndpointsList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EnvFromSource) Size() (n int) { + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + sovGenerated(uint64(l)) + if m.ConfigMapRef != nil { + l = m.ConfigMapRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EnvVar) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + if m.ValueFrom != nil { + l = m.ValueFrom.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EnvVarSource) Size() (n int) { + var l int + _ = l + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMapKeyRef != nil { + l = m.ConfigMapKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretKeyRef != nil { + l = m.SecretKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Event) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.InvolvedObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FirstTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EventList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EventSource) Size() (n int) { + var l int + _ = l + l = len(m.Component) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExecAction) Size() (n int) { + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FCVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.TargetWWNs) > 0 { + for _, s := range m.TargetWWNs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Lun != nil { + n += 1 + sovGenerated(uint64(*m.Lun)) + } + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *FlexVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *FlockerVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.DatasetName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DatasetUUID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GCEPersistentDiskVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.PDName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 + return n +} + +func (m *GitRepoVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Repository) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Revision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Directory) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GlusterfsVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *HTTPGetAction) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Scheme) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.HTTPHeaders) > 0 { + for _, e := range m.HTTPHeaders { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HTTPHeader) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Handler) Size() (n int) { + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTPGet != nil { + l = m.HTTPGet.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TCPSocket != nil { + l = m.TCPSocket.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HostAlias) Size() (n int) { + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Hostnames) > 0 { + for _, s := range m.Hostnames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HostPathVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ISCSIVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *KeyToPath) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) + } + return n +} + +func (m *Lifecycle) Size() (n int) { + var l int + _ = l + if m.PostStart != nil { + l = m.PostStart.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PreStop != nil { + l = m.PreStop.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LimitRange) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LimitRangeItem) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Max) > 0 { + for k, v := range m.Max { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Min) > 0 { + for k, v := range m.Min { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Default) > 0 { + for k, v := range m.Default { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.DefaultRequest) > 0 { + for k, v := range m.DefaultRequest { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MaxLimitRequestRatio) > 0 { + for k, v := range m.MaxLimitRequestRatio { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *LimitRangeList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LimitRangeSpec) Size() (n int) { + var l int + _ = l + if len(m.Limits) > 0 { + for _, e := range m.Limits { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *List) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ListOptions) Size() (n int) { + var l int + _ = l + l = len(m.LabelSelector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldSelector) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + n += 2 + return n +} + +func (m *LoadBalancerIngress) Size() (n int) { + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LoadBalancerStatus) Size() (n int) { + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LocalObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LocalVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NFSVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Server) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Namespace) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamespaceList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NamespaceSpec) Size() (n int) { + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NamespaceStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Node) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeAddress) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Address) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeAffinity) Size() (n int) { + var l int + _ = l + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastHeartbeatTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeDaemonEndpoints) Size() (n int) { + var l int + _ = l + l = m.KubeletEndpoint.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeProxyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeResources) Size() (n int) { + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeSelector) Size() (n int) { + var l int + _ = l + if len(m.NodeSelectorTerms) > 0 { + for _, e := range m.NodeSelectorTerms { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSelectorTerm) Size() (n int) { + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSpec) Size() (n int) { + var l int + _ = l + l = len(m.PodCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ExternalID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Taints) > 0 { + for _, e := range m.Taints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeStatus) Size() (n int) { + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Allocatable) > 0 { + for k, v := range m.Allocatable { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DaemonEndpoints.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.NodeInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.VolumesInUse) > 0 { + for _, s := range m.VolumesInUse { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.VolumesAttached) > 0 { + for _, e := range m.VolumesAttached { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSystemInfo) Size() (n int) { + var l int + _ = l + l = len(m.MachineID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SystemUUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.BootID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KernelVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OSImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerRuntimeVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeProxyVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OperatingSystem) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Architecture) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectFieldSelector) Size() (n int) { + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectMeta) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GenerateName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SelfLink) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + l = m.CreationTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeletionTimestamp != nil { + l = m.DeletionTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DeletionGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.OwnerReferences) > 0 { + for _, e := range m.OwnerReferences { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ClusterName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Initializers != nil { + l = m.Initializers.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolume) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolumeClaim) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolumeClaimList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PersistentVolumeClaimSpec) Size() (n int) { + var l int + _ = l + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.StorageClassName != nil { + l = len(*m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PersistentVolumeClaimStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.ClaimName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *PersistentVolumeList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PersistentVolumeSource) Size() (n int) { + var l int + _ = l + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Local != nil { + l = m.Local.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PersistentVolumeSpec) Size() (n int) { + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.PersistentVolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ClaimRef != nil { + l = m.ClaimRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.PersistentVolumeReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolumeStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PhotonPersistentDiskVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.PdID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Pod) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAffinity) Size() (n int) { + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAffinityTerm) Size() (n int) { + var l int + _ = l + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TopologyKey) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAntiAffinity) Size() (n int) { + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAttachOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodExecOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodLogOptions) Size() (n int) { + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + return n +} + +func (m *PodPortForwardOptions) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + n += 1 + sovGenerated(uint64(e)) + } + } + return n +} + +func (m *PodProxyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityContext) Size() (n int) { + var l int + _ = l + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if len(m.SupplementalGroups) > 0 { + for _, e := range m.SupplementalGroups { + n += 1 + sovGenerated(uint64(e)) + } + } + if m.FSGroup != nil { + n += 1 + sovGenerated(uint64(*m.FSGroup)) + } + return n +} + +func (m *PodSignature) Size() (n int) { + var l int + _ = l + if m.PodController != nil { + l = m.PodController.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodSpec) Size() (n int) { + var l int + _ = l + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RestartPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.TerminationGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + l = len(m.DNSPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeprecatedServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + n += 2 + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Hostname) + n += 2 + l + sovGenerated(uint64(l)) + l = len(m.Subdomain) + n += 2 + l + sovGenerated(uint64(l)) + if m.Affinity != nil { + l = m.Affinity.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + l = len(m.SchedulerName) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.InitContainers) > 0 { + for _, e := range m.InitContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.AutomountServiceAccountToken != nil { + n += 3 + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.HostAliases) > 0 { + for _, e := range m.HostAliases { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodIP) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ContainerStatuses) > 0 { + for _, e := range m.ContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.QOSClass) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.InitContainerStatuses) > 0 { + for _, e := range m.InitContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodStatusResult) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodTemplate) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodTemplateList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodTemplateSpec) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PortworxVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Preconditions) Size() (n int) { + var l int + _ = l + if m.UID != nil { + l = len(*m.UID) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PreferAvoidPodsEntry) Size() (n int) { + var l int + _ = l + l = m.PodSignature.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.EvictionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PreferredSchedulingTerm) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.Preference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Probe) Size() (n int) { + var l int + _ = l + l = m.Handler.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) + n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) + n += 1 + sovGenerated(uint64(m.PeriodSeconds)) + n += 1 + sovGenerated(uint64(m.SuccessThreshold)) + n += 1 + sovGenerated(uint64(m.FailureThreshold)) + return n +} + +func (m *ProjectedVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.Sources) > 0 { + for _, e := range m.Sources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + return n +} + +func (m *QuobyteVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Volume) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RBDVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *RangeAllocation) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Range) + n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicationController) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicationControllerCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicationControllerList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicationControllerSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicationControllerStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceFieldSelector) Size() (n int) { + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Divisor.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceQuota) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceQuotaList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceQuotaSpec) Size() (n int) { + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceQuotaStatus) Size() (n int) { + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Used) > 0 { + for k, v := range m.Used { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ResourceRequirements) Size() (n int) { + var l int + _ = l + if len(m.Limits) > 0 { + for k, v := range m.Limits { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Requests) > 0 { + for k, v := range m.Requests { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SELinuxOptions) Size() (n int) { + var l int + _ = l + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Role) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleIOVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Secret) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + l = 0 + if v != nil { + l = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.StringData) > 0 { + for k, v := range m.StringData { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SecretEnvSource) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretKeySelector) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SecretProjection) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecurityContext) Size() (n int) { + var l int + _ = l + if m.Capabilities != nil { + l = m.Capabilities.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Privileged != nil { + n += 2 + } + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if m.ReadOnlyRootFilesystem != nil { + n += 2 + } + return n +} + +func (m *SerializedReference) Size() (n int) { + var l int + _ = l + l = m.Reference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Service) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceAccount) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AutomountServiceAccountToken != nil { + n += 2 + } + return n +} + +func (m *ServiceAccountList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServicePort) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = m.TargetPort.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.NodePort)) + return n +} + +func (m *ServiceProxyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceSpec) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ClusterIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ExternalIPs) > 0 { + for _, s := range m.ExternalIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SessionAffinity) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.LoadBalancerIP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ExternalName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ExternalTrafficPolicy) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HealthCheckNodePort)) + return n +} + +func (m *ServiceStatus) Size() (n int) { + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StorageOSPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeNamespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *StorageOSVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeNamespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Sysctl) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TCPSocketAction) Size() (n int) { + var l int + _ = l + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Taint) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TimeAdded.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Toleration) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TolerationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) + } + return n +} + +func (m *Volume) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.VolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeMount) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.MountPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeProjection) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeSource) Size() (n int) { + var l int + _ = l + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EmptyDir != nil { + l = m.EmptyDir.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitRepo != nil { + l = m.GitRepo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PersistentVolumeClaim != nil { + l = m.PersistentVolumeClaim.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Projected != nil { + l = m.Projected.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePolicyName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePolicyID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WeightedPodAffinityTerm) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.PodAffinityTerm.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AWSElasticBlockStoreVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AWSElasticBlockStoreVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Affinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Affinity{`, + `NodeAffinity:` + strings.Replace(fmt.Sprintf("%v", this.NodeAffinity), "NodeAffinity", "NodeAffinity", 1) + `,`, + `PodAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAffinity), "PodAffinity", "PodAffinity", 1) + `,`, + `PodAntiAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAntiAffinity), "PodAntiAffinity", "PodAntiAffinity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AttachedVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachedVolume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} +func (this *AvoidPods) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AvoidPods{`, + `PreferAvoidPods:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferAvoidPods), "PreferAvoidPodsEntry", "PreferAvoidPodsEntry", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *AzureDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureDiskVolumeSource{`, + `DiskName:` + fmt.Sprintf("%v", this.DiskName) + `,`, + `DataDiskURI:` + fmt.Sprintf("%v", this.DataDiskURI) + `,`, + `CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, + `Kind:` + valueToStringGenerated(this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *AzureFileVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureFileVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Binding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Binding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Capabilities) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Capabilities{`, + `Add:` + fmt.Sprintf("%v", this.Add) + `,`, + `Drop:` + fmt.Sprintf("%v", this.Drop) + `,`, + `}`, + }, "") + return s +} +func (this *CephFSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CephFSVolumeSource{`, + `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CinderVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CinderVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ComponentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ComponentStatus{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatusList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ComponentStatusList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMap) String() string { + if this == nil { + return "nil" + } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string]string{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + } + mapStringForData += "}" + s := strings.Join([]string{`&ConfigMap{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapVolumeSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *Container) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Container{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + `,`, + `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "EnvVar", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + `,`, + `LivenessProbe:` + strings.Replace(fmt.Sprintf("%v", this.LivenessProbe), "Probe", "Probe", 1) + `,`, + `ReadinessProbe:` + strings.Replace(fmt.Sprintf("%v", this.ReadinessProbe), "Probe", "Probe", 1) + `,`, + `Lifecycle:` + strings.Replace(fmt.Sprintf("%v", this.Lifecycle), "Lifecycle", "Lifecycle", 1) + `,`, + `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, + `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "SecurityContext", 1) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + `,`, + `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerImage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerImage{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, + `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerState) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerState{`, + `Waiting:` + strings.Replace(fmt.Sprintf("%v", this.Waiting), "ContainerStateWaiting", "ContainerStateWaiting", 1) + `,`, + `Running:` + strings.Replace(fmt.Sprintf("%v", this.Running), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`, + `Terminated:` + strings.Replace(fmt.Sprintf("%v", this.Terminated), "ContainerStateTerminated", "ContainerStateTerminated", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateRunning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateRunning{`, + `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateTerminated) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateTerminated{`, + `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, + `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `FinishedAt:` + strings.Replace(strings.Replace(this.FinishedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateWaiting) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateWaiting{`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `LastTerminationState:` + strings.Replace(strings.Replace(this.LastTerminationState.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `Ready:` + fmt.Sprintf("%v", this.Ready) + `,`, + `RestartCount:` + fmt.Sprintf("%v", this.RestartCount) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonEndpoint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonEndpoint{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteOptions{`, + `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`, + `Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`, + `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, + `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownwardAPIProjection{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeFile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownwardAPIVolumeFile{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownwardAPIVolumeSource{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *EmptyDirVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EmptyDirVolumeSource{`, + `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, + `SizeLimit:` + strings.Replace(strings.Replace(this.SizeLimit.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointAddress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "ObjectReference", 1) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSubset) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointSubset{`, + `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, + `NotReadyAddresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NotReadyAddresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Endpoints) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Endpoints{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subsets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subsets), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointsList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointsList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Endpoints", "Endpoints", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvFromSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvFromSource{`, + `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, + `ConfigMapRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapRef), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretEnvSource", "SecretEnvSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvVar) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvVar{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `ValueFrom:` + strings.Replace(fmt.Sprintf("%v", this.ValueFrom), "EnvVarSource", "EnvVarSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvVarSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvVarSource{`, + `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `ConfigMapKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapKeyRef), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, + `SecretKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretKeyRef), "SecretKeySelector", "SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Event{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`, + `FirstTimestamp:` + strings.Replace(strings.Replace(this.FirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTimestamp:` + strings.Replace(strings.Replace(this.LastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EventSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSource{`, + `Component:` + fmt.Sprintf("%v", this.Component) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *ExecAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecAction{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *FCVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FCVolumeSource{`, + `TargetWWNs:` + fmt.Sprintf("%v", this.TargetWWNs) + `,`, + `Lun:` + valueToStringGenerated(this.Lun) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *FlexVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *FlockerVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlockerVolumeSource{`, + `DatasetName:` + fmt.Sprintf("%v", this.DatasetName) + `,`, + `DatasetUUID:` + fmt.Sprintf("%v", this.DatasetUUID) + `,`, + `}`, + }, "") + return s +} +func (this *GCEPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GCEPersistentDiskVolumeSource{`, + `PDName:` + fmt.Sprintf("%v", this.PDName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *GitRepoVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitRepoVolumeSource{`, + `Repository:` + fmt.Sprintf("%v", this.Repository) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, + `}`, + }, "") + return s +} +func (this *GlusterfsVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlusterfsVolumeSource{`, + `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPGetAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPGetAction{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, + `HTTPHeaders:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HTTPHeaders), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPHeader) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPHeader{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Handler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Handler{`, + `Exec:` + strings.Replace(fmt.Sprintf("%v", this.Exec), "ExecAction", "ExecAction", 1) + `,`, + `HTTPGet:` + strings.Replace(fmt.Sprintf("%v", this.HTTPGet), "HTTPGetAction", "HTTPGetAction", 1) + `,`, + `TCPSocket:` + strings.Replace(fmt.Sprintf("%v", this.TCPSocket), "TCPSocketAction", "TCPSocketAction", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HostAlias) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostAlias{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostnames:` + fmt.Sprintf("%v", this.Hostnames) + `,`, + `}`, + }, "") + return s +} +func (this *HostPathVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostPathVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ISCSIVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `}`, + }, "") + return s +} +func (this *KeyToPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KeyToPath{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *Lifecycle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Lifecycle{`, + `PostStart:` + strings.Replace(fmt.Sprintf("%v", this.PostStart), "Handler", "Handler", 1) + `,`, + `PreStop:` + strings.Replace(fmt.Sprintf("%v", this.PreStop), "Handler", "Handler", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitRange{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeItem) String() string { + if this == nil { + return "nil" + } + keysForMax := make([]string, 0, len(this.Max)) + for k := range this.Max { + keysForMax = append(keysForMax, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + mapStringForMax := "ResourceList{" + for _, k := range keysForMax { + mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)]) + } + mapStringForMax += "}" + keysForMin := make([]string, 0, len(this.Min)) + for k := range this.Min { + keysForMin = append(keysForMin, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + mapStringForMin := "ResourceList{" + for _, k := range keysForMin { + mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)]) + } + mapStringForMin += "}" + keysForDefault := make([]string, 0, len(this.Default)) + for k := range this.Default { + keysForDefault = append(keysForDefault, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + mapStringForDefault := "ResourceList{" + for _, k := range keysForDefault { + mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)]) + } + mapStringForDefault += "}" + keysForDefaultRequest := make([]string, 0, len(this.DefaultRequest)) + for k := range this.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + mapStringForDefaultRequest := "ResourceList{" + for _, k := range keysForDefaultRequest { + mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)]) + } + mapStringForDefaultRequest += "}" + keysForMaxLimitRequestRatio := make([]string, 0, len(this.MaxLimitRequestRatio)) + for k := range this.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + mapStringForMaxLimitRequestRatio := "ResourceList{" + for _, k := range keysForMaxLimitRequestRatio { + mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)]) + } + mapStringForMaxLimitRequestRatio += "}" + s := strings.Join([]string{`&LimitRangeItem{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Max:` + mapStringForMax + `,`, + `Min:` + mapStringForMin + `,`, + `Default:` + mapStringForDefault + `,`, + `DefaultRequest:` + mapStringForDefaultRequest + `,`, + `MaxLimitRequestRatio:` + mapStringForMaxLimitRequestRatio + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitRangeList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "LimitRange", "LimitRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitRangeSpec{`, + `Limits:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Limits), "LimitRangeItem", "LimitRangeItem", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *List) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&List{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListOptions{`, + `LabelSelector:` + fmt.Sprintf("%v", this.LabelSelector) + `,`, + `FieldSelector:` + fmt.Sprintf("%v", this.FieldSelector) + `,`, + `Watch:` + fmt.Sprintf("%v", this.Watch) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, + `}`, + }, "") + return s +} +func (this *LoadBalancerIngress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `}`, + }, "") + return s +} +func (this *LoadBalancerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LoadBalancerStatus{`, + `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LocalObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalObjectReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *LocalVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *NFSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NFSVolumeSource{`, + `Server:` + fmt.Sprintf("%v", this.Server) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Namespace) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Namespace{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceSpec{`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `}`, + }, "") + return s +} +func (this *Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Node{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeAddress{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Address:` + fmt.Sprintf("%v", this.Address) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "NodeSelector", "NodeSelector", 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "PreferredSchedulingTerm", "PreferredSchedulingTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastHeartbeatTime:` + strings.Replace(strings.Replace(this.LastHeartbeatTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NodeDaemonEndpoints) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeDaemonEndpoints{`, + `KubeletEndpoint:` + strings.Replace(strings.Replace(this.KubeletEndpoint.String(), "DaemonEndpoint", "DaemonEndpoint", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Node", "Node", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *NodeResources) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&NodeResources{`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSelector{`, + `NodeSelectorTerms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NodeSelectorTerms), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSelectorTerm{`, + `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSpec{`, + `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, + `ExternalID:` + fmt.Sprintf("%v", this.ExternalID) + `,`, + `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, + `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, + `Taints:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Taints), "Taint", "Taint", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeStatus) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + keysForAllocatable := make([]string, 0, len(this.Allocatable)) + for k := range this.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + mapStringForAllocatable := "ResourceList{" + for _, k := range keysForAllocatable { + mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)]) + } + mapStringForAllocatable += "}" + s := strings.Join([]string{`&NodeStatus{`, + `Capacity:` + mapStringForCapacity + `,`, + `Allocatable:` + mapStringForAllocatable + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + `,`, + `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + `,`, + `DaemonEndpoints:` + strings.Replace(strings.Replace(this.DaemonEndpoints.String(), "NodeDaemonEndpoints", "NodeDaemonEndpoints", 1), `&`, ``, 1) + `,`, + `NodeInfo:` + strings.Replace(strings.Replace(this.NodeInfo.String(), "NodeSystemInfo", "NodeSystemInfo", 1), `&`, ``, 1) + `,`, + `Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + `,`, + `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, + `VolumesAttached:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumesAttached), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSystemInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSystemInfo{`, + `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, + `SystemUUID:` + fmt.Sprintf("%v", this.SystemUUID) + `,`, + `BootID:` + fmt.Sprintf("%v", this.BootID) + `,`, + `KernelVersion:` + fmt.Sprintf("%v", this.KernelVersion) + `,`, + `OSImage:` + fmt.Sprintf("%v", this.OSImage) + `,`, + `ContainerRuntimeVersion:` + fmt.Sprintf("%v", this.ContainerRuntimeVersion) + `,`, + `KubeletVersion:` + fmt.Sprintf("%v", this.KubeletVersion) + `,`, + `KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`, + `OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectFieldSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectFieldSelector{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMeta) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ObjectMeta{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `GenerateName:` + fmt.Sprintf("%v", this.GenerateName) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference", 1), `&`, ``, 1) + `,`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, + `Initializers:` + strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializers", "k8s_io_apimachinery_pkg_apis_meta_v1.Initializers", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolume{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimStatus) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimVolumeSource{`, + `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeSource{`, + `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, + `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, + `Local:` + strings.Replace(fmt.Sprintf("%v", this.Local), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSpec) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeSpec{`, + `Capacity:` + mapStringForCapacity + `,`, + `PersistentVolumeSource:` + strings.Replace(strings.Replace(this.PersistentVolumeSource.String(), "PersistentVolumeSource", "PersistentVolumeSource", 1), `&`, ``, 1) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `ClaimRef:` + strings.Replace(fmt.Sprintf("%v", this.ClaimRef), "ObjectReference", "ObjectReference", 1) + `,`, + `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, + `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `}`, + }, "") + return s +} +func (this *PhotonPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PhotonPersistentDiskVolumeSource{`, + `PdID:` + fmt.Sprintf("%v", this.PdID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `}`, + }, "") + return s +} +func (this *Pod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Pod{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodAffinityTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAffinityTerm{`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, + `}`, + }, "") + return s +} +func (this *PodAntiAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAntiAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodAttachOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAttachOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *PodCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PodExecOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodExecOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *PodList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Pod", "Pod", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `}`, + }, "") + return s +} +func (this *PodPortForwardOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPortForwardOptions{`, + `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, + `}`, + }, "") + return s +} +func (this *PodProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityContext{`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, + `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, + `}`, + }, "") + return s +} +func (this *PodSignature) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSignature{`, + `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSpec) String() string { + if this == nil { + return "nil" + } + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&PodSpec{`, + `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "Volume", 1), `&`, ``, 1) + `,`, + `Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "Container", "Container", 1), `&`, ``, 1) + `,`, + `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, + `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "PodSecurityContext", 1) + `,`, + `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, + `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "Affinity", 1) + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `InitContainers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainers), "Container", "Container", 1), `&`, ``, 1) + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `Tolerations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tolerations), "Toleration", "Toleration", 1), `&`, ``, 1) + `,`, + `HostAliases:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostAliases), "HostAlias", "HostAlias", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PodCondition", "PodCondition", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `ContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, + `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, + `InitContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodStatusResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodStatusResult{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PortworxVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortworxVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Preconditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Preconditions{`, + `UID:` + valueToStringGenerated(this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *PreferAvoidPodsEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreferAvoidPodsEntry{`, + `PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`, + `EvictionTime:` + strings.Replace(strings.Replace(this.EvictionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PreferredSchedulingTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreferredSchedulingTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `Preference:` + strings.Replace(strings.Replace(this.Preference.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Probe) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Probe{`, + `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, + `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, + `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, + `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, + `SuccessThreshold:` + fmt.Sprintf("%v", this.SuccessThreshold) + `,`, + `FailureThreshold:` + fmt.Sprintf("%v", this.FailureThreshold) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectedVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProjectedVolumeSource{`, + `Sources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sources), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *QuobyteVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QuobyteVolumeSource{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `Volume:` + fmt.Sprintf("%v", this.Volume) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `}`, + }, "") + return s +} +func (this *RBDVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RBDVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *RangeAllocation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RangeAllocation{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Range:` + fmt.Sprintf("%v", this.Range) + `,`, + `Data:` + valueToStringGenerated(this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationController) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationController{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerSpec) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ReplicationControllerSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "PodTemplateSpec", 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicationControllerCondition", "ReplicationControllerCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceFieldSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceFieldSelector{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Divisor:` + strings.Replace(strings.Replace(this.Divisor.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuota) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceQuota{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceQuotaList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaSpec) String() string { + if this == nil { + return "nil" + } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + } + mapStringForHard += "}" + s := strings.Join([]string{`&ResourceQuotaSpec{`, + `Hard:` + mapStringForHard + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaStatus) String() string { + if this == nil { + return "nil" + } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + } + mapStringForHard += "}" + keysForUsed := make([]string, 0, len(this.Used)) + for k := range this.Used { + keysForUsed = append(keysForUsed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + mapStringForUsed := "ResourceList{" + for _, k := range keysForUsed { + mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)]) + } + mapStringForUsed += "}" + s := strings.Join([]string{`&ResourceQuotaStatus{`, + `Hard:` + mapStringForHard + `,`, + `Used:` + mapStringForUsed + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRequirements) String() string { + if this == nil { + return "nil" + } + keysForLimits := make([]string, 0, len(this.Limits)) + for k := range this.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + mapStringForLimits := "ResourceList{" + for _, k := range keysForLimits { + mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) + } + mapStringForLimits += "}" + keysForRequests := make([]string, 0, len(this.Requests)) + for k := range this.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + mapStringForRequests := "ResourceList{" + for _, k := range keysForRequests { + mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) + } + mapStringForRequests += "}" + s := strings.Join([]string{`&ResourceRequirements{`, + `Limits:` + mapStringForLimits + `,`, + `Requests:` + mapStringForRequests + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxOptions{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleIOVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleIOVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Secret) String() string { + if this == nil { + return "nil" + } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string][]byte{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + } + mapStringForData += "}" + keysForStringData := make([]string, 0, len(this.StringData)) + for k := range this.StringData { + keysForStringData = append(keysForStringData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + mapStringForStringData := "map[string]string{" + for _, k := range keysForStringData { + mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k]) + } + mapStringForStringData += "}" + s := strings.Join([]string{`&Secret{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `StringData:` + mapStringForStringData + `,`, + `}`, + }, "") + return s +} +func (this *SecretEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Secret", "Secret", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SecretProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecurityContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecurityContext{`, + `Capabilities:` + strings.Replace(fmt.Sprintf("%v", this.Capabilities), "Capabilities", "Capabilities", 1) + `,`, + `Privileged:` + valueToStringGenerated(this.Privileged) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`, + `}`, + }, "") + return s +} +func (this *SerializedReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SerializedReference{`, + `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Service{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccount{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Secrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secrets), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Service", "Service", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServicePort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServicePort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `TargetPort:` + strings.Replace(strings.Replace(this.TargetPort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ServiceSpec{`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ServicePort", "ServicePort", 1), `&`, ``, 1) + `,`, + `Selector:` + mapStringForSelector + `,`, + `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, + `SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`, + `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, + `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, + `ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`, + `ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`, + `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageOSPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageOSPersistentVolumeSource{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "ObjectReference", "ObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageOSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageOSVolumeSource{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Sysctl) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sysctl{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *TCPSocketAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TCPSocketAction{`, + `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *Taint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Taint{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TimeAdded:` + strings.Replace(strings.Replace(this.TimeAdded.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Toleration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Toleration{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *Volume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Volume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeMount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeMount{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, + `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeProjection{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretProjection", "SecretProjection", 1) + `,`, + `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, + `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeSource{`, + `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `EmptyDir:` + strings.Replace(fmt.Sprintf("%v", this.EmptyDir), "EmptyDirVolumeSource", "EmptyDirVolumeSource", 1) + `,`, + `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `GitRepo:` + strings.Replace(fmt.Sprintf("%v", this.GitRepo), "GitRepoVolumeSource", "GitRepoVolumeSource", 1) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretVolumeSource", "SecretVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, + `PersistentVolumeClaim:` + strings.Replace(fmt.Sprintf("%v", this.PersistentVolumeClaim), "PersistentVolumeClaimVolumeSource", "PersistentVolumeClaimVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIVolumeSource", "DownwardAPIVolumeSource", 1) + `,`, + `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, + `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, + `Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VsphereVirtualDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`, + `VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `StoragePolicyName:` + fmt.Sprintf("%v", this.StoragePolicyName) + `,`, + `StoragePolicyID:` + fmt.Sprintf("%v", this.StoragePolicyID) + `,`, + `}`, + }, "") + return s +} +func (this *WeightedPodAffinityTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WeightedPodAffinityTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `PodAffinityTerm:` + strings.Replace(strings.Replace(this.PodAffinityTerm.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Partition |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Affinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Affinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeAffinity == nil { + m.NodeAffinity = &NodeAffinity{} + } + if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAffinity == nil { + m.PodAffinity = &PodAffinity{} + } + if err := m.PodAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAntiAffinity == nil { + m.PodAntiAffinity = &PodAntiAffinity{} + } + if err := m.PodAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttachedVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DevicePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AvoidPods) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) + if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DiskName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataDiskURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AzureDataDiskCachingMode(dAtA[iNdEx:postIndex]) + m.CachingMode = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AzureDataDiskKind(dAtA[iNdEx:postIndex]) + m.Kind = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShareName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Binding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Binding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Capabilities) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ComponentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ComponentStatus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMap) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Data == nil { + m.Data = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Data[mapkey] = mapvalue + } else { + var mapvalue string + m.Data[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ConfigMap{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Container) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Container: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkingDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, ContainerPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} + } + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} + } + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Lifecycle == nil { + m.Lifecycle = &Lifecycle{} + } + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &SecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.StdinOnce = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerImage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) + } + m.SizeBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SizeBytes |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) + } + m.HostPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostPort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) + } + m.ContainerPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerPort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Waiting == nil { + m.Waiting = &ContainerStateWaiting{} + } + if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Running == nil { + m.Running = &ContainerStateRunning{} + } + if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Terminated == nil { + m.Terminated = &ContainerStateTerminated{} + } + if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + } + m.ExitCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitCode |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + } + m.Signal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Signal |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTerminationState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ready = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) + } + m.RestartCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RestartCount |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.GracePeriodSeconds = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preconditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Preconditions == nil { + m.Preconditions = &Preconditions{} + } + if err := m.Preconditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OrphanDependents", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OrphanDependents = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PropagationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := DeletionPropagation(dAtA[iNdEx:postIndex]) + m.PropagationPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} + } + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Medium = StorageMedium(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetRef == nil { + m.TargetRef = &ObjectReference{} + } + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSubset) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, EndpointAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) + if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subsets = append(m.Subsets, EndpointSubset{}) + if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Endpoints{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvFromSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMapRef == nil { + m.ConfigMapRef = &ConfigMapEnvSource{} + } + if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretEnvSource{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvVar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValueFrom == nil { + m.ValueFrom = &EnvVarSource{} + } + if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvVarSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} + } + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMapKeyRef == nil { + m.ConfigMapKeyRef = &ConfigMapKeySelector{} + } + if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretKeyRef == nil { + m.SecretKeyRef = &SecretKeySelector{} + } + if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InvolvedObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.InvolvedObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FirstTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FirstTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Event{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Component", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Component = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FCVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FCVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetWWNs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetWWNs = append(m.TargetWWNs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Lun = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlexVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlexVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Options == nil { + m.Options = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Options[mapkey] = mapvalue + } else { + var mapvalue string + m.Options[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlockerVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlockerVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DatasetName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DatasetName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DatasetUUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DatasetUUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PDName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PDName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Partition |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitRepoVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitRepoVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Repository = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Directory", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Directory = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndpointsName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheme", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scheme = URIScheme(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPHeaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HTTPHeaders = append(m.HTTPHeaders, HTTPHeader{}) + if err := m.HTTPHeaders[len(m.HTTPHeaders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Handler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Handler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Handler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exec == nil { + m.Exec = &ExecAction{} + } + if err := m.Exec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTPGet == nil { + m.HTTPGet = &HTTPGetAction{} + } + if err := m.HTTPGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TCPSocket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TCPSocket == nil { + m.TCPSocket = &TCPSocketAction{} + } + if err := m.TCPSocket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostAlias) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostAlias: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostAlias: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostnames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostnames = append(m.Hostnames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostPathVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostPathVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetPortal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IQN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + m.Lun = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lun |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ISCSIInterface = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DiscoveryCHAPAuth = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SessionCHAPAuth = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyToPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Lifecycle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PostStart == nil { + m.PostStart = &Handler{} + } + if err := m.PostStart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PreStop == nil { + m.PreStop = &Handler{} + } + if err := m.PreStop.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRangeItem: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeItem: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = LimitType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Max == nil { + m.Max = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Max[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Max[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Min == nil { + m.Min = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Min[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Min[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Default == nil { + m.Default = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Default[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Default[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.DefaultRequest == nil { + m.DefaultRequest = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.DefaultRequest[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.DefaultRequest[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxLimitRequestRatio", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.MaxLimitRequestRatio == nil { + m.MaxLimitRequestRatio = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.MaxLimitRequestRatio[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRangeList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRangeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, LimitRange{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRangeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Limits = append(m.Limits, LimitRangeItem{}) + if err := m.Limits[len(m.Limits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *List) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: List: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Watch = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeUninitialized = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LoadBalancerIngress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LoadBalancerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, LoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Server = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Namespace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Namespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Namespace{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, FinalizerName(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = NamespacePhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Node) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Node: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = NodeAddressType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeAffinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequiredDuringSchedulingIgnoredDuringExecution == nil { + m.RequiredDuringSchedulingIgnoredDuringExecution = &NodeSelector{} + } + if err := m.RequiredDuringSchedulingIgnoredDuringExecution.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, PreferredSchedulingTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = NodeConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeartbeatTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastHeartbeatTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeDaemonEndpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeDaemonEndpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeletEndpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.KubeletEndpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Node{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Capacity[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Capacity[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelectorTerms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeSelectorTerms = append(m.NodeSelectorTerms, NodeSelectorTerm{}) + if err := m.NodeSelectorTerms[len(m.NodeSelectorTerms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = NodeSelectorOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelectorTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, NodeSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodCIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodCIDR = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProviderID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Unschedulable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Unschedulable = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Taints = append(m.Taints, Taint{}) + if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Capacity[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Capacity[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Allocatable == nil { + m.Allocatable = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Allocatable[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Allocatable[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = NodePhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, NodeCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, NodeAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DaemonEndpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DaemonEndpoints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.NodeInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ContainerImage{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumesInUse", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumesInUse = append(m.VolumesInUse, UniqueVolumeName(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumesAttached", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumesAttached = append(m.VolumesAttached, AttachedVolume{}) + if err := m.VolumesAttached[len(m.VolumesAttached)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSystemInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSystemInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MachineID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SystemUUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SystemUUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BootID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BootID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KernelVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KernelVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OSImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OSImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerRuntimeVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerRuntimeVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeletVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubeletVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeProxyVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubeProxyVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OperatingSystem", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OperatingSystem = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architecture = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectFieldSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GenerateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelfLink = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CreationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeletionTimestamp == nil { + m.DeletionTimestamp = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.DeletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DeletionGracePeriodSeconds = &v + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Annotations[mapkey] = mapvalue + } else { + var mapvalue string + m.Annotations[mapkey] = mapvalue + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OwnerReferences = append(m.OwnerReferences, k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{}) + if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Initializers == nil { + m.Initializers = &k8s_io_apimachinery_pkg_apis_meta_v1.Initializers{} + } + if err := m.Initializers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PersistentVolumeClaim{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.StorageClassName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PersistentVolumeClaimPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Capacity[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Capacity[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClaimName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PersistentVolume{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GCEPersistentDisk == nil { + m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{} + } + if err := m.GCEPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AWSElasticBlockStore == nil { + m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{} + } + if err := m.AWSElasticBlockStore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HostPath == nil { + m.HostPath = &HostPathVolumeSource{} + } + if err := m.HostPath.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Glusterfs == nil { + m.Glusterfs = &GlusterfsVolumeSource{} + } + if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NFS == nil { + m.NFS = &NFSVolumeSource{} + } + if err := m.NFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RBD == nil { + m.RBD = &RBDVolumeSource{} + } + if err := m.RBD.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ISCSI == nil { + m.ISCSI = &ISCSIVolumeSource{} + } + if err := m.ISCSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cinder == nil { + m.Cinder = &CinderVolumeSource{} + } + if err := m.Cinder.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CephFS == nil { + m.CephFS = &CephFSVolumeSource{} + } + if err := m.CephFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FC == nil { + m.FC = &FCVolumeSource{} + } + if err := m.FC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Flocker == nil { + m.Flocker = &FlockerVolumeSource{} + } + if err := m.Flocker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FlexVolume == nil { + m.FlexVolume = &FlexVolumeSource{} + } + if err := m.FlexVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureFile == nil { + m.AzureFile = &AzureFileVolumeSource{} + } + if err := m.AzureFile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VsphereVolume == nil { + m.VsphereVolume = &VsphereVirtualDiskVolumeSource{} + } + if err := m.VsphereVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quobyte", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Quobyte == nil { + m.Quobyte = &QuobyteVolumeSource{} + } + if err := m.Quobyte.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureDisk == nil { + m.AzureDisk = &AzureDiskVolumeSource{} + } + if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PhotonPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PhotonPersistentDisk == nil { + m.PhotonPersistentDisk = &PhotonPersistentDiskVolumeSource{} + } + if err := m.PhotonPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortworxVolume == nil { + m.PortworxVolume = &PortworxVolumeSource{} + } + if err := m.PortworxVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleIO == nil { + m.ScaleIO = &ScaleIOVolumeSource{} + } + if err := m.ScaleIO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Local", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Local == nil { + m.Local = &LocalVolumeSource{} + } + if err := m.Local.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageOS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StorageOS == nil { + m.StorageOS = &StorageOSPersistentVolumeSource{} + } + if err := m.StorageOS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Capacity[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Capacity[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PersistentVolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClaimRef == nil { + m.ClaimRef = &ObjectReference{} + } + if err := m.ClaimRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeReclaimPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PersistentVolumePhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PhotonPersistentDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PhotonPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PdID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PdID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Pod) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Pod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Pod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAffinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAffinityTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PodConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodExecOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodExecOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodExecOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Pod{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodLogOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodLogOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodLogOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Previous = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SinceSeconds = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SinceTime == nil { + m.SinceTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Timestamps = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TailLines = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LimitBytes = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodPortForwardOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPortForwardOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var v k8s_io_apimachinery_pkg_types.UnixUserID + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (k8s_io_apimachinery_pkg_types.UnixUserID(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsUser = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RunAsNonRoot = &b + case 4: + if wireType == 0 { + var v k8s_io_apimachinery_pkg_types.UnixGroupID + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (k8s_io_apimachinery_pkg_types.UnixGroupID(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v k8s_io_apimachinery_pkg_types.UnixGroupID + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (k8s_io_apimachinery_pkg_types.UnixGroupID(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + } + var v k8s_io_apimachinery_pkg_types.UnixGroupID + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (k8s_io_apimachinery_pkg_types.UnixGroupID(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FSGroup = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSignature) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSignature: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSignature: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodController", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodController == nil { + m.PodController = &k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{} + } + if err := m.PodController.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, Container{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RestartPolicy = RestartPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TerminationGracePeriodSeconds = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DNSPolicy = DNSPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.NodeSelector[mapkey] = mapvalue + } else { + var mapvalue string + m.NodeSelector[mapkey] = mapvalue + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedServiceAccount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedServiceAccount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostNetwork = bool(v != 0) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostPID = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostIPC = bool(v != 0) + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &PodSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subdomain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Affinity == nil { + m.Affinity = &Affinity{} + } + if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchedulerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InitContainers = append(m.InitContainers, Container{}) + if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AutomountServiceAccountToken = &b + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostAliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostAliases = append(m.HostAliases, HostAlias{}) + if err := m.HostAliases[len(m.HostAliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PodPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, PodCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTime == nil { + m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerStatuses = append(m.ContainerStatuses, ContainerStatus{}) + if err := m.ContainerStatuses[len(m.ContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QOSClass", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QOSClass = PodQOSClass(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitContainerStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InitContainerStatuses = append(m.InitContainerStatuses, ContainerStatus{}) + if err := m.InitContainerStatuses[len(m.InitContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodStatusResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodStatusResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodStatusResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortworxVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortworxVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Preconditions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Preconditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + m.UID = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreferAvoidPodsEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreferAvoidPodsEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSignature", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodSignature.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EvictionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreferredSchedulingTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreferredSchedulingTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Weight |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Preference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Probe) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Probe: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Probe: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Handler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialDelaySeconds", wireType) + } + m.InitialDelaySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InitialDelaySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + m.TimeoutSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeoutSeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType) + } + m.PeriodSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PeriodSeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessThreshold", wireType) + } + m.SuccessThreshold = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SuccessThreshold |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureThreshold", wireType) + } + m.FailureThreshold = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FailureThreshold |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectedVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectedVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sources = append(m.Sources, VolumeProjection{}) + if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuobyteVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuobyteVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Registry = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volume = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RBDVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RBDVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephMonitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CephMonitors = append(m.CephMonitors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBDImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RBDImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBDPool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RBDPool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RadosUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RadosUser = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyring", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyring = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeAllocation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeAllocation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeAllocation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Range = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationController) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationController: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationController: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ReplicationControllerConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ReplicationController{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Selector == nil { + m.Selector = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Template == nil { + m.Template = &PodTemplateSpec{} + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ReplicationControllerCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Divisor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Divisor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuota) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceQuota{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Hard == nil { + m.Hard = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Hard[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Hard[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, ResourceQuotaScope(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Hard == nil { + m.Hard = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Hard[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Hard[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Used", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Used == nil { + m.Used = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Used[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Used[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Limits == nil { + m.Limits = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Limits[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Limits[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Requests == nil { + m.Requests = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Requests[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Requests[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleIOVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleIOVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateway = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field System", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.System = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SSLEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SSLEnabled = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtectionDomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProtectionDomain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoragePool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageMode = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Secret) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Secret: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Data == nil { + m.Data = make(map[string][]byte) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + m.Data[mapkey] = mapvalue + } else { + var mapvalue []byte + m.Data[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = SecretType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.StringData == nil { + m.StringData = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.StringData[mapkey] = mapvalue + } else { + var mapvalue string + m.StringData[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretEnvSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretKeySelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Secret{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecurityContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecurityContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capabilities == nil { + m.Capabilities = &Capabilities{} + } + if err := m.Capabilities.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Privileged = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var v k8s_io_apimachinery_pkg_types.UnixUserID + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (k8s_io_apimachinery_pkg_types.UnixUserID(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsUser = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RunAsNonRoot = &b + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnlyRootFilesystem = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SerializedReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SerializedReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SerializedReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Reference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Service) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Service: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, ObjectReference{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AutomountServiceAccountToken = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ServiceAccount{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Service{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServicePort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServicePort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServicePort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetPort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePort", wireType) + } + m.NodePort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodePort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, ServicePort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Selector == nil { + m.Selector = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ServiceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalIPs = append(m.ExternalIPs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionAffinity = ServiceAffinity(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalTrafficPolicy = ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HealthCheckNodePort", wireType) + } + m.HealthCheckNodePort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HealthCheckNodePort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageOSPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageOSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &ObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageOSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageOSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sysctl) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sysctl: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sysctl: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TCPSocketAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TCPSocketAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Taint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Taint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Taint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Effect = TaintEffect(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Toleration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Toleration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Toleration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = TolerationOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Effect = TaintEffect(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TolerationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TolerationSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Volume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Volume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.VolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeMount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeMount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeMount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &SecretProjection{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DownwardAPI == nil { + m.DownwardAPI = &DownwardAPIProjection{} + } + if err := m.DownwardAPI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &ConfigMapProjection{} + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HostPath == nil { + m.HostPath = &HostPathVolumeSource{} + } + if err := m.HostPath.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EmptyDir", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EmptyDir == nil { + m.EmptyDir = &EmptyDirVolumeSource{} + } + if err := m.EmptyDir.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GCEPersistentDisk == nil { + m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{} + } + if err := m.GCEPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AWSElasticBlockStore == nil { + m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{} + } + if err := m.AWSElasticBlockStore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitRepo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitRepo == nil { + m.GitRepo = &GitRepoVolumeSource{} + } + if err := m.GitRepo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &SecretVolumeSource{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NFS == nil { + m.NFS = &NFSVolumeSource{} + } + if err := m.NFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ISCSI == nil { + m.ISCSI = &ISCSIVolumeSource{} + } + if err := m.ISCSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Glusterfs == nil { + m.Glusterfs = &GlusterfsVolumeSource{} + } + if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaim", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PersistentVolumeClaim == nil { + m.PersistentVolumeClaim = &PersistentVolumeClaimVolumeSource{} + } + if err := m.PersistentVolumeClaim.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RBD == nil { + m.RBD = &RBDVolumeSource{} + } + if err := m.RBD.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FlexVolume == nil { + m.FlexVolume = &FlexVolumeSource{} + } + if err := m.FlexVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cinder == nil { + m.Cinder = &CinderVolumeSource{} + } + if err := m.Cinder.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CephFS == nil { + m.CephFS = &CephFSVolumeSource{} + } + if err := m.CephFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Flocker == nil { + m.Flocker = &FlockerVolumeSource{} + } + if err := m.Flocker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DownwardAPI == nil { + m.DownwardAPI = &DownwardAPIVolumeSource{} + } + if err := m.DownwardAPI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FC == nil { + m.FC = &FCVolumeSource{} + } + if err := m.FC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureFile == nil { + m.AzureFile = &AzureFileVolumeSource{} + } + if err := m.AzureFile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &ConfigMapVolumeSource{} + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VsphereVolume == nil { + m.VsphereVolume = &VsphereVirtualDiskVolumeSource{} + } + if err := m.VsphereVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quobyte", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Quobyte == nil { + m.Quobyte = &QuobyteVolumeSource{} + } + if err := m.Quobyte.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureDisk == nil { + m.AzureDisk = &AzureDiskVolumeSource{} + } + if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PhotonPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PhotonPersistentDisk == nil { + m.PhotonPersistentDisk = &PhotonPersistentDiskVolumeSource{} + } + if err := m.PhotonPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortworxVolume == nil { + m.PortworxVolume = &PortworxVolumeSource{} + } + if err := m.PortworxVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleIO == nil { + m.ScaleIO = &ScaleIOVolumeSource{} + } + if err := m.ScaleIO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Projected", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Projected == nil { + m.Projected = &ProjectedVolumeSource{} + } + if err := m.Projected.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageOS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StorageOS == nil { + m.StorageOS = &StorageOSVolumeSource{} + } + if err := m.StorageOS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePolicyName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoragePolicyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePolicyID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoragePolicyID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WeightedPodAffinityTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WeightedPodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Weight |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinityTerm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodAffinityTerm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/client-go/pkg/api/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 11467 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x8c, 0x24, 0xc7, + 0x75, 0x98, 0x7a, 0x66, 0xf6, 0x63, 0xde, 0x7e, 0xd7, 0xed, 0x1d, 0x97, 0x2b, 0xf2, 0xf6, 0xd8, + 0x14, 0xe9, 0x23, 0x79, 0xdc, 0xd3, 0x1d, 0x49, 0xf1, 0x24, 0xca, 0xb4, 0x76, 0x77, 0x76, 0xef, + 0xd6, 0xf7, 0x35, 0xac, 0xd9, 0xbb, 0xa3, 0x28, 0x46, 0x64, 0xdf, 0x74, 0xed, 0x6e, 0xf3, 0x66, + 0xbb, 0x87, 0xdd, 0x3d, 0x7b, 0xb7, 0x34, 0x0c, 0xd8, 0x8a, 0x60, 0x2b, 0x80, 0x92, 0xc8, 0x70, + 0x04, 0x04, 0x4e, 0x00, 0x05, 0x06, 0xe2, 0x28, 0xdf, 0x56, 0x04, 0x7d, 0x18, 0x96, 0x13, 0xc4, + 0xb1, 0x1c, 0x39, 0x48, 0x1c, 0x03, 0x46, 0x6c, 0x05, 0x86, 0xd7, 0xd6, 0x0a, 0xf1, 0xbf, 0x04, + 0x41, 0xf2, 0x6f, 0xf3, 0x81, 0xa0, 0x3e, 0xbb, 0xaa, 0xa7, 0x67, 0xbb, 0x67, 0x79, 0xbb, 0xa6, + 0x84, 0xfc, 0x9b, 0xa9, 0xf7, 0xea, 0x55, 0x75, 0x7d, 0xbc, 0x7a, 0xef, 0xd5, 0x7b, 0xaf, 0xe0, + 0xdc, 0xbd, 0x4b, 0xd1, 0xbc, 0x17, 0x9c, 0xbf, 0xd7, 0xb9, 0x4b, 0x42, 0x9f, 0xc4, 0x24, 0x3a, + 0xdf, 0xbe, 0xb7, 0x71, 0xde, 0x69, 0x7b, 0xe7, 0xb7, 0x2f, 0x9c, 0xdf, 0x20, 0x3e, 0x09, 0x9d, + 0x98, 0xb8, 0xf3, 0xed, 0x30, 0x88, 0x03, 0xf4, 0x18, 0xc7, 0x9e, 0x4f, 0xb0, 0xe7, 0xdb, 0xf7, + 0x36, 0xe6, 0x9d, 0xb6, 0x37, 0xbf, 0x7d, 0x61, 0xf6, 0xf9, 0x0d, 0x2f, 0xde, 0xec, 0xdc, 0x9d, + 0x6f, 0x06, 0x5b, 0xe7, 0x37, 0x82, 0x8d, 0xe0, 0x3c, 0xab, 0x74, 0xb7, 0xb3, 0xce, 0xfe, 0xb1, + 0x3f, 0xec, 0x17, 0x27, 0x36, 0xfb, 0xa2, 0x68, 0xda, 0x69, 0x7b, 0x5b, 0x4e, 0x73, 0xd3, 0xf3, + 0x49, 0xb8, 0xa3, 0x1a, 0x0f, 0x49, 0x14, 0x74, 0xc2, 0x26, 0x49, 0x77, 0xe1, 0xc0, 0x5a, 0xd1, + 0xf9, 0x2d, 0x12, 0x3b, 0x19, 0x1d, 0x9f, 0x3d, 0xdf, 0xab, 0x56, 0xd8, 0xf1, 0x63, 0x6f, 0xab, + 0xbb, 0x99, 0x8f, 0xe5, 0x55, 0x88, 0x9a, 0x9b, 0x64, 0xcb, 0xe9, 0xaa, 0xf7, 0x42, 0xaf, 0x7a, + 0x9d, 0xd8, 0x6b, 0x9d, 0xf7, 0xfc, 0x38, 0x8a, 0xc3, 0x74, 0x25, 0xfb, 0x8f, 0x2d, 0x38, 0xb3, + 0x70, 0xa7, 0xb1, 0xdc, 0x72, 0xa2, 0xd8, 0x6b, 0x2e, 0xb6, 0x82, 0xe6, 0xbd, 0x46, 0x1c, 0x84, + 0xe4, 0x76, 0xd0, 0xea, 0x6c, 0x91, 0x06, 0x1b, 0x08, 0x74, 0x0e, 0x86, 0xb7, 0xd9, 0xff, 0xd5, + 0xda, 0x8c, 0x75, 0xc6, 0x3a, 0x5b, 0x5d, 0x9c, 0xfc, 0xde, 0xee, 0xdc, 0x87, 0xf6, 0x76, 0xe7, + 0x86, 0x6f, 0x8b, 0x72, 0xac, 0x30, 0xd0, 0xd3, 0x30, 0xb8, 0x1e, 0xad, 0xed, 0xb4, 0xc9, 0x4c, + 0x89, 0xe1, 0x8e, 0x0b, 0xdc, 0xc1, 0x95, 0x06, 0x2d, 0xc5, 0x02, 0x8a, 0xce, 0x43, 0xb5, 0xed, + 0x84, 0xb1, 0x17, 0x7b, 0x81, 0x3f, 0x53, 0x3e, 0x63, 0x9d, 0x1d, 0x58, 0x9c, 0x12, 0xa8, 0xd5, + 0xba, 0x04, 0xe0, 0x04, 0x87, 0x76, 0x23, 0x24, 0x8e, 0x7b, 0xd3, 0x6f, 0xed, 0xcc, 0x54, 0xce, + 0x58, 0x67, 0x87, 0x93, 0x6e, 0x60, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0xed, 0x12, 0x0c, 0x2f, 0xac, + 0xaf, 0x7b, 0xbe, 0x17, 0xef, 0xa0, 0xb7, 0x61, 0xd4, 0x0f, 0x5c, 0x22, 0xff, 0xb3, 0xaf, 0x18, + 0xb9, 0xf8, 0xec, 0xfc, 0x41, 0x8b, 0x6a, 0xfe, 0x86, 0x56, 0x63, 0x71, 0x72, 0x6f, 0x77, 0x6e, + 0x54, 0x2f, 0xc1, 0x06, 0x45, 0xf4, 0x26, 0x8c, 0xb4, 0x03, 0x57, 0x35, 0x50, 0x62, 0x0d, 0x3c, + 0x73, 0x70, 0x03, 0xf5, 0xa4, 0xc2, 0xe2, 0xc4, 0xde, 0xee, 0xdc, 0x88, 0x56, 0x80, 0x75, 0x72, + 0xa8, 0x05, 0x13, 0xf4, 0xaf, 0x1f, 0x7b, 0xaa, 0x85, 0x32, 0x6b, 0xe1, 0xf9, 0xfc, 0x16, 0xb4, + 0x4a, 0x8b, 0x27, 0xf6, 0x76, 0xe7, 0x26, 0x52, 0x85, 0x38, 0x4d, 0xda, 0x7e, 0x0f, 0xc6, 0x17, + 0xe2, 0xd8, 0x69, 0x6e, 0x12, 0x97, 0xcf, 0x2f, 0x7a, 0x11, 0x2a, 0xbe, 0xb3, 0x45, 0xc4, 0xec, + 0x9f, 0x11, 0xc3, 0x5e, 0xb9, 0xe1, 0x6c, 0x91, 0xfd, 0xdd, 0xb9, 0xc9, 0x5b, 0xbe, 0xf7, 0x6e, + 0x47, 0xac, 0x19, 0x5a, 0x86, 0x19, 0x36, 0xba, 0x08, 0xe0, 0x92, 0x6d, 0xaf, 0x49, 0xea, 0x4e, + 0xbc, 0x29, 0x56, 0x03, 0x12, 0x75, 0xa1, 0xa6, 0x20, 0x58, 0xc3, 0xb2, 0x3f, 0x67, 0x41, 0x75, + 0x61, 0x3b, 0xf0, 0xdc, 0x7a, 0xe0, 0x46, 0xa8, 0x03, 0x13, 0xed, 0x90, 0xac, 0x93, 0x50, 0x15, + 0xcd, 0x58, 0x67, 0xca, 0x67, 0x47, 0x2e, 0x5e, 0xcc, 0xf9, 0x6e, 0xb3, 0xd2, 0xb2, 0x1f, 0x87, + 0x3b, 0x8b, 0x8f, 0x88, 0xa6, 0x27, 0x52, 0x50, 0x9c, 0x6e, 0xc3, 0xfe, 0x9d, 0x12, 0x9c, 0x5c, + 0x78, 0xaf, 0x13, 0x92, 0x9a, 0x17, 0xdd, 0x4b, 0x6f, 0x05, 0xd7, 0x8b, 0xee, 0xdd, 0x48, 0x06, + 0x43, 0xad, 0xc1, 0x9a, 0x28, 0xc7, 0x0a, 0x03, 0x3d, 0x0f, 0x43, 0xf4, 0xf7, 0x2d, 0xbc, 0x2a, + 0xbe, 0xfe, 0x84, 0x40, 0x1e, 0xa9, 0x39, 0xb1, 0x53, 0xe3, 0x20, 0x2c, 0x71, 0xd0, 0x75, 0x18, + 0x69, 0xb2, 0x9d, 0xbb, 0x71, 0x3d, 0x70, 0x09, 0x9b, 0xe1, 0xea, 0xe2, 0x73, 0x14, 0x7d, 0x29, + 0x29, 0xde, 0xdf, 0x9d, 0x9b, 0xe1, 0x7d, 0x13, 0x24, 0x34, 0x18, 0xd6, 0xeb, 0x23, 0x5b, 0x6d, + 0xc4, 0x0a, 0xa3, 0x04, 0x19, 0x9b, 0xf0, 0xac, 0xb6, 0xa7, 0x06, 0xd8, 0x9e, 0x1a, 0xcd, 0xde, + 0x4f, 0xe8, 0x02, 0x54, 0xee, 0x79, 0xbe, 0x3b, 0x33, 0xc8, 0x68, 0x3d, 0x4e, 0xa7, 0xff, 0xaa, + 0xe7, 0xbb, 0xfb, 0xbb, 0x73, 0x53, 0x46, 0x77, 0x68, 0x21, 0x66, 0xa8, 0xf6, 0x3f, 0xb2, 0xc4, + 0x30, 0xae, 0x78, 0x2d, 0x93, 0xa3, 0x5c, 0x04, 0x88, 0x48, 0x33, 0x24, 0xb1, 0x36, 0x90, 0x6a, + 0x65, 0x34, 0x14, 0x04, 0x6b, 0x58, 0x94, 0x5f, 0x44, 0x9b, 0x4e, 0xc8, 0x16, 0x98, 0x18, 0x4e, + 0xc5, 0x2f, 0x1a, 0x12, 0x80, 0x13, 0x1c, 0x83, 0x5f, 0x94, 0x73, 0xf9, 0xc5, 0x6f, 0x5b, 0x30, + 0xb4, 0xe8, 0xf9, 0xae, 0xe7, 0x6f, 0xa0, 0xb7, 0x61, 0x98, 0xb2, 0x73, 0xd7, 0x89, 0x1d, 0xc1, + 0x2a, 0x3e, 0x2a, 0xd7, 0x9b, 0xce, 0x5d, 0xe5, 0x8a, 0x8b, 0xe6, 0x29, 0x36, 0x5d, 0x77, 0x37, + 0xef, 0xbe, 0x43, 0x9a, 0xf1, 0x75, 0x12, 0x3b, 0xc9, 0xe7, 0x24, 0x65, 0x58, 0x51, 0x45, 0xb7, + 0x60, 0x30, 0x76, 0xc2, 0x0d, 0x12, 0x0b, 0x4e, 0x91, 0xb3, 0x8f, 0x39, 0x0d, 0x4c, 0x57, 0x29, + 0xf1, 0x9b, 0x24, 0xe1, 0xa9, 0x6b, 0x8c, 0x08, 0x16, 0xc4, 0xec, 0x26, 0x8c, 0x2e, 0x39, 0x6d, + 0xe7, 0xae, 0xd7, 0xf2, 0x62, 0x8f, 0x44, 0xe8, 0x27, 0xa0, 0xec, 0xb8, 0x2e, 0xdb, 0x33, 0xd5, + 0xc5, 0x93, 0x7b, 0xbb, 0x73, 0xe5, 0x05, 0x97, 0x4e, 0x19, 0x28, 0xac, 0x1d, 0x4c, 0x31, 0xd0, + 0xb3, 0x50, 0x71, 0xc3, 0xa0, 0x3d, 0x53, 0x62, 0x98, 0xa7, 0xe8, 0xec, 0xd6, 0xc2, 0xa0, 0x9d, + 0x42, 0x65, 0x38, 0xf6, 0x77, 0x4b, 0x80, 0x96, 0x48, 0x7b, 0x73, 0xa5, 0x61, 0xcc, 0xe9, 0x59, + 0x18, 0xde, 0x0a, 0x7c, 0x2f, 0x0e, 0xc2, 0x48, 0x34, 0xc8, 0x96, 0xd2, 0x75, 0x51, 0x86, 0x15, + 0x14, 0x9d, 0x81, 0x4a, 0x3b, 0xe1, 0x08, 0xa3, 0x92, 0x9b, 0x30, 0x5e, 0xc0, 0x20, 0x14, 0xa3, + 0x13, 0x91, 0x50, 0x6c, 0x01, 0x85, 0x71, 0x2b, 0x22, 0x21, 0x66, 0x90, 0x64, 0x05, 0xd1, 0xb5, + 0x25, 0x16, 0x78, 0x6a, 0x05, 0x51, 0x08, 0xd6, 0xb0, 0xd0, 0x5b, 0x50, 0xe5, 0xff, 0x30, 0x59, + 0x67, 0xab, 0x3d, 0x97, 0x8f, 0x5c, 0x0b, 0x9a, 0x4e, 0x2b, 0x3d, 0xf8, 0x63, 0x6c, 0xc5, 0x49, + 0x42, 0x38, 0xa1, 0x69, 0xac, 0xb8, 0xc1, 0xdc, 0x15, 0xf7, 0xb7, 0x2d, 0x40, 0x4b, 0x9e, 0xef, + 0x92, 0xf0, 0x18, 0x4e, 0xdb, 0xfe, 0x36, 0xc3, 0x9f, 0xd0, 0xae, 0x05, 0x5b, 0xed, 0xc0, 0x27, + 0x7e, 0xbc, 0x14, 0xf8, 0x2e, 0x3f, 0x81, 0x3f, 0x01, 0x95, 0x98, 0x36, 0xc5, 0xbb, 0xf5, 0xb4, + 0x9c, 0x16, 0xda, 0xc0, 0xfe, 0xee, 0xdc, 0xa9, 0xee, 0x1a, 0xac, 0x0b, 0xac, 0x0e, 0xfa, 0x38, + 0x0c, 0x46, 0xb1, 0x13, 0x77, 0x22, 0xd1, 0xd1, 0x27, 0x64, 0x47, 0x1b, 0xac, 0x74, 0x7f, 0x77, + 0x6e, 0x42, 0x55, 0xe3, 0x45, 0x58, 0x54, 0x40, 0xcf, 0xc0, 0xd0, 0x16, 0x89, 0x22, 0x67, 0x43, + 0xf2, 0xc4, 0x09, 0x51, 0x77, 0xe8, 0x3a, 0x2f, 0xc6, 0x12, 0x8e, 0x9e, 0x84, 0x01, 0x12, 0x86, + 0x41, 0x28, 0x56, 0xc4, 0x98, 0x40, 0x1c, 0x58, 0xa6, 0x85, 0x98, 0xc3, 0xec, 0xff, 0x6c, 0xc1, + 0x84, 0xea, 0x2b, 0x6f, 0xeb, 0x18, 0xb6, 0xbc, 0x0b, 0xd0, 0x94, 0x1f, 0x18, 0xb1, 0x8d, 0xa6, + 0xb5, 0x91, 0xbd, 0xfc, 0xba, 0x07, 0x34, 0x69, 0x43, 0x15, 0x45, 0x58, 0xa3, 0x6b, 0xff, 0x5b, + 0x0b, 0x4e, 0xa4, 0xbe, 0xed, 0x9a, 0x17, 0xc5, 0xe8, 0xcd, 0xae, 0xef, 0x9b, 0x2f, 0xf6, 0x7d, + 0xb4, 0x36, 0xfb, 0x3a, 0xb5, 0x5e, 0x64, 0x89, 0xf6, 0x6d, 0x18, 0x06, 0xbc, 0x98, 0x6c, 0xc9, + 0xcf, 0x7a, 0xbe, 0xe0, 0x67, 0xf1, 0xfe, 0x25, 0xb3, 0xb4, 0x4a, 0x69, 0x60, 0x4e, 0xca, 0xfe, + 0x5f, 0x16, 0x54, 0x97, 0x02, 0x7f, 0xdd, 0xdb, 0xb8, 0xee, 0xb4, 0x8f, 0x61, 0x7e, 0x1a, 0x50, + 0x61, 0xd4, 0xf9, 0x27, 0x5c, 0xc8, 0xfb, 0x04, 0xd1, 0xb1, 0x79, 0x7a, 0xee, 0x71, 0xf9, 0x42, + 0xb1, 0x29, 0x5a, 0x84, 0x19, 0xb1, 0xd9, 0x97, 0xa1, 0xaa, 0x10, 0xd0, 0x24, 0x94, 0xef, 0x11, + 0x2e, 0x7c, 0x56, 0x31, 0xfd, 0x89, 0xa6, 0x61, 0x60, 0xdb, 0x69, 0x75, 0xc4, 0xe6, 0xc5, 0xfc, + 0xcf, 0x27, 0x4a, 0x97, 0x2c, 0xfb, 0xbb, 0x6c, 0x07, 0x8a, 0x46, 0x96, 0xfd, 0x6d, 0xc1, 0x1c, + 0x3e, 0x6f, 0xc1, 0x74, 0x2b, 0x83, 0x29, 0x89, 0x31, 0x39, 0x0c, 0x3b, 0x7b, 0x4c, 0x74, 0x7b, + 0x3a, 0x0b, 0x8a, 0x33, 0x5b, 0xa3, 0xbc, 0x3e, 0x68, 0xd3, 0x05, 0xe7, 0xb4, 0x58, 0xd7, 0x85, + 0xd8, 0x70, 0x53, 0x94, 0x61, 0x05, 0xb5, 0xff, 0xc2, 0x82, 0x69, 0xf5, 0x1d, 0x57, 0xc9, 0x4e, + 0x83, 0xb4, 0x48, 0x33, 0x0e, 0xc2, 0x0f, 0xca, 0x97, 0x3c, 0xce, 0xe7, 0x84, 0xf3, 0xa4, 0x11, + 0x41, 0xa0, 0x7c, 0x95, 0xec, 0xf0, 0x09, 0xd2, 0x3f, 0xb4, 0x7c, 0xe0, 0x87, 0xfe, 0xa6, 0x05, + 0x63, 0xea, 0x43, 0x8f, 0x61, 0xcb, 0x5d, 0x33, 0xb7, 0xdc, 0x4f, 0x14, 0x5c, 0xaf, 0x3d, 0x36, + 0xdb, 0xdf, 0x2a, 0x51, 0xb6, 0x21, 0x70, 0xea, 0x61, 0x40, 0x07, 0x89, 0x72, 0xfc, 0x0f, 0xc8, + 0x2c, 0xf5, 0xf7, 0xb1, 0x57, 0xc9, 0xce, 0x5a, 0x40, 0xa5, 0x89, 0xec, 0x8f, 0x35, 0x26, 0xb5, + 0x72, 0xe0, 0xa4, 0xfe, 0x5e, 0x09, 0x4e, 0xaa, 0x61, 0x31, 0x4e, 0xe9, 0x1f, 0xcb, 0x81, 0xb9, + 0x00, 0x23, 0x2e, 0x59, 0x77, 0x3a, 0xad, 0x58, 0x29, 0x20, 0x03, 0x5c, 0x33, 0xad, 0x25, 0xc5, + 0x58, 0xc7, 0xe9, 0x63, 0x2c, 0xbf, 0x32, 0xc2, 0xf8, 0x79, 0xec, 0xd0, 0x55, 0x4f, 0x25, 0x3c, + 0x4d, 0xa3, 0x1c, 0xd5, 0x35, 0x4a, 0xa1, 0x3d, 0x3e, 0x09, 0x03, 0xde, 0x16, 0x3d, 0xf3, 0x4b, + 0xe6, 0x51, 0xbe, 0x4a, 0x0b, 0x31, 0x87, 0xa1, 0xa7, 0x60, 0xa8, 0x19, 0x6c, 0x6d, 0x39, 0xbe, + 0x3b, 0x53, 0x66, 0x32, 0xe7, 0x08, 0x15, 0x0b, 0x96, 0x78, 0x11, 0x96, 0x30, 0xf4, 0x18, 0x54, + 0x9c, 0x70, 0x23, 0x9a, 0xa9, 0x30, 0x9c, 0x61, 0xda, 0xd2, 0x42, 0xb8, 0x11, 0x61, 0x56, 0x4a, + 0x65, 0xc9, 0xfb, 0x41, 0x78, 0xcf, 0xf3, 0x37, 0x6a, 0x5e, 0xc8, 0x04, 0x43, 0x4d, 0x96, 0xbc, + 0xa3, 0x20, 0x58, 0xc3, 0x42, 0x75, 0x18, 0x68, 0x07, 0x61, 0x1c, 0xcd, 0x0c, 0xb2, 0x81, 0x7f, + 0x2e, 0x77, 0xfb, 0xf1, 0xef, 0xae, 0x07, 0x61, 0x9c, 0x7c, 0x0a, 0xfd, 0x17, 0x61, 0x4e, 0x08, + 0x2d, 0x41, 0x99, 0xf8, 0xdb, 0x33, 0x43, 0x8c, 0xde, 0x47, 0x0e, 0xa6, 0xb7, 0xec, 0x6f, 0xdf, + 0x76, 0xc2, 0x84, 0x5f, 0x2d, 0xfb, 0xdb, 0x98, 0xd6, 0x46, 0x4d, 0xa8, 0x4a, 0xfb, 0x55, 0x34, + 0x33, 0x5c, 0x64, 0x29, 0x62, 0x81, 0x8e, 0xc9, 0xbb, 0x1d, 0x2f, 0x24, 0x5b, 0xc4, 0x8f, 0xa3, + 0x44, 0xb1, 0x92, 0xd0, 0x08, 0x27, 0x74, 0x51, 0x13, 0x46, 0xb9, 0xfc, 0x79, 0x3d, 0xe8, 0xf8, + 0x71, 0x34, 0x53, 0x65, 0x5d, 0xce, 0x31, 0x76, 0xdc, 0x4e, 0x6a, 0x2c, 0x4e, 0x0b, 0xf2, 0xa3, + 0x5a, 0x61, 0x84, 0x0d, 0xa2, 0xe8, 0x4d, 0x18, 0x6b, 0x79, 0xdb, 0xc4, 0x27, 0x51, 0x54, 0x0f, + 0x83, 0xbb, 0x64, 0x06, 0xd8, 0xd7, 0x3c, 0x99, 0xa7, 0xf8, 0x07, 0x77, 0xc9, 0xe2, 0xd4, 0xde, + 0xee, 0xdc, 0xd8, 0x35, 0xbd, 0x36, 0x36, 0x89, 0xa1, 0xb7, 0x60, 0x9c, 0x0a, 0xbb, 0x5e, 0x42, + 0x7e, 0xa4, 0x38, 0x79, 0xb4, 0xb7, 0x3b, 0x37, 0x8e, 0x8d, 0xea, 0x38, 0x45, 0x0e, 0xad, 0x41, + 0xb5, 0xe5, 0xad, 0x93, 0xe6, 0x4e, 0xb3, 0x45, 0x66, 0x46, 0x19, 0xed, 0x9c, 0xcd, 0x79, 0x4d, + 0xa2, 0x73, 0x05, 0x43, 0xfd, 0xc5, 0x09, 0x21, 0x74, 0x1b, 0x4e, 0xc5, 0x24, 0xdc, 0xf2, 0x7c, + 0x87, 0x6e, 0x2a, 0x21, 0xfd, 0x32, 0xeb, 0xca, 0x18, 0x5b, 0xb5, 0xa7, 0xc5, 0xc0, 0x9e, 0x5a, + 0xcb, 0xc4, 0xc2, 0x3d, 0x6a, 0xa3, 0x9b, 0x30, 0xc1, 0xf6, 0x53, 0xbd, 0xd3, 0x6a, 0xd5, 0x83, + 0x96, 0xd7, 0xdc, 0x99, 0x19, 0x67, 0x04, 0x9f, 0x92, 0x36, 0x93, 0x55, 0x13, 0x4c, 0x15, 0xc3, + 0xe4, 0x1f, 0x4e, 0xd7, 0x46, 0x2d, 0x98, 0x88, 0x48, 0xb3, 0x13, 0x7a, 0xf1, 0x0e, 0x5d, 0xfb, + 0xe4, 0x41, 0x3c, 0x33, 0x51, 0x44, 0xd1, 0x6d, 0x98, 0x95, 0xb8, 0xc1, 0x2a, 0x55, 0x88, 0xd3, + 0xa4, 0x29, 0xab, 0x88, 0x62, 0xd7, 0xf3, 0x67, 0x26, 0x19, 0x07, 0x52, 0xfb, 0xab, 0x41, 0x0b, + 0x31, 0x87, 0x31, 0xfb, 0x01, 0xfd, 0x71, 0x93, 0x72, 0xe9, 0x29, 0x86, 0x98, 0xd8, 0x0f, 0x24, + 0x00, 0x27, 0x38, 0x54, 0x34, 0x88, 0xe3, 0x9d, 0x19, 0xc4, 0x50, 0xd5, 0x56, 0x5b, 0x5b, 0xfb, + 0x34, 0xa6, 0xe5, 0xe8, 0x36, 0x0c, 0x11, 0x7f, 0x7b, 0x25, 0x0c, 0xb6, 0x66, 0x4e, 0x14, 0xe1, + 0x01, 0xcb, 0x1c, 0x99, 0x9f, 0x1f, 0x89, 0x0a, 0x23, 0x8a, 0xb1, 0x24, 0x86, 0x1e, 0xc0, 0x4c, + 0xc6, 0x2c, 0xf1, 0x49, 0x99, 0x66, 0x93, 0xf2, 0x49, 0x51, 0x77, 0x66, 0xad, 0x07, 0xde, 0xfe, + 0x01, 0x30, 0xdc, 0x93, 0xba, 0x7d, 0x17, 0xc6, 0x15, 0xa3, 0x62, 0xf3, 0x8d, 0xe6, 0x60, 0x80, + 0xf2, 0x62, 0xa9, 0xd0, 0x57, 0xe9, 0xa0, 0x52, 0x16, 0x1d, 0x61, 0x5e, 0xce, 0x06, 0xd5, 0x7b, + 0x8f, 0x2c, 0xee, 0xc4, 0x84, 0x2b, 0x76, 0x65, 0x6d, 0x50, 0x25, 0x00, 0x27, 0x38, 0xf6, 0xff, + 0xe5, 0x62, 0x52, 0xc2, 0x0d, 0x0b, 0x9c, 0x04, 0xe7, 0x60, 0x78, 0x33, 0x88, 0x62, 0x8a, 0xcd, + 0xda, 0x18, 0x48, 0x04, 0xa3, 0x2b, 0xa2, 0x1c, 0x2b, 0x0c, 0xf4, 0x0a, 0x8c, 0x35, 0xf5, 0x06, + 0xc4, 0x31, 0x76, 0x52, 0x54, 0x31, 0x5b, 0xc7, 0x26, 0x2e, 0xba, 0x04, 0xc3, 0xcc, 0x30, 0xde, + 0x0c, 0x5a, 0x42, 0x85, 0x94, 0xa7, 0xf2, 0x70, 0x5d, 0x94, 0xef, 0x6b, 0xbf, 0xb1, 0xc2, 0xa6, + 0x8a, 0x38, 0xed, 0xc2, 0x6a, 0x5d, 0x1c, 0x20, 0x4a, 0x11, 0xbf, 0xc2, 0x4a, 0xb1, 0x80, 0xda, + 0xbf, 0x5e, 0xd2, 0x46, 0x99, 0x2a, 0x40, 0x04, 0xbd, 0x01, 0x43, 0xf7, 0x1d, 0x2f, 0xf6, 0xfc, + 0x0d, 0x21, 0x3d, 0xbc, 0x50, 0xf0, 0x34, 0x61, 0xd5, 0xef, 0xf0, 0xaa, 0xfc, 0xe4, 0x13, 0x7f, + 0xb0, 0x24, 0x48, 0x69, 0x87, 0x1d, 0xdf, 0xa7, 0xb4, 0x4b, 0xfd, 0xd3, 0xc6, 0xbc, 0x2a, 0xa7, + 0x2d, 0xfe, 0x60, 0x49, 0x10, 0xad, 0x03, 0xc8, 0xb5, 0x44, 0x5c, 0x61, 0x90, 0xfe, 0x58, 0x3f, + 0xe4, 0xd7, 0x54, 0xed, 0xc5, 0x71, 0x7a, 0xd6, 0x26, 0xff, 0xb1, 0x46, 0xd9, 0x8e, 0x99, 0x10, + 0xd6, 0xdd, 0x2d, 0xf4, 0x19, 0xba, 0xa5, 0x9d, 0x30, 0x26, 0xee, 0x42, 0x9c, 0xb6, 0xe9, 0x1f, + 0x2c, 0x62, 0xaf, 0x79, 0x5b, 0x44, 0xdf, 0xfe, 0x82, 0x08, 0x4e, 0xe8, 0xd9, 0xdf, 0x2a, 0xc3, + 0x4c, 0xaf, 0xee, 0xd2, 0x25, 0x49, 0x1e, 0x78, 0xf1, 0x12, 0x15, 0x93, 0x2c, 0x73, 0x49, 0x2e, + 0x8b, 0x72, 0xac, 0x30, 0xe8, 0xda, 0x88, 0xbc, 0x0d, 0xa9, 0x2c, 0x0d, 0x24, 0x6b, 0xa3, 0xc1, + 0x4a, 0xb1, 0x80, 0x52, 0xbc, 0x90, 0x38, 0x91, 0xb8, 0x0f, 0xd1, 0xd6, 0x10, 0x66, 0xa5, 0x58, + 0x40, 0x75, 0x83, 0x48, 0x25, 0xc7, 0x20, 0x62, 0x0c, 0xd1, 0xc0, 0xc3, 0x1d, 0x22, 0xf4, 0x59, + 0x80, 0x75, 0xcf, 0xf7, 0xa2, 0x4d, 0x46, 0x7d, 0xb0, 0x6f, 0xea, 0x4a, 0xc8, 0x5a, 0x51, 0x54, + 0xb0, 0x46, 0x11, 0xbd, 0x04, 0x23, 0x6a, 0x7b, 0xae, 0xd6, 0x66, 0x86, 0x4c, 0x1b, 0x7a, 0xc2, + 0xab, 0x6a, 0x58, 0xc7, 0xb3, 0xdf, 0x49, 0xaf, 0x17, 0xb1, 0x2b, 0xb4, 0xf1, 0xb5, 0x8a, 0x8e, + 0x6f, 0xe9, 0xe0, 0xf1, 0xb5, 0xff, 0xa8, 0x0c, 0x13, 0x46, 0x63, 0x9d, 0xa8, 0x00, 0x47, 0x7b, + 0x8d, 0x1e, 0x58, 0x4e, 0x4c, 0xc4, 0x9e, 0x3c, 0xd7, 0xcf, 0xa6, 0xd1, 0x8f, 0x37, 0xba, 0x17, + 0x38, 0x25, 0xb4, 0x09, 0xd5, 0x96, 0x13, 0x31, 0x93, 0x0a, 0x11, 0x7b, 0xb1, 0x3f, 0xb2, 0x89, + 0xfa, 0xe1, 0x44, 0xb1, 0x76, 0x7a, 0xf0, 0x56, 0x12, 0xe2, 0xf4, 0xb4, 0xa5, 0xc2, 0x8e, 0xbc, + 0x84, 0x53, 0xdd, 0xa1, 0x12, 0xd1, 0x0e, 0xe6, 0x30, 0x74, 0x09, 0x46, 0x43, 0xc2, 0x56, 0xca, + 0x12, 0x95, 0xe7, 0xd8, 0xd2, 0x1b, 0x48, 0x04, 0x3f, 0xac, 0xc1, 0xb0, 0x81, 0x99, 0xc8, 0xfd, + 0x83, 0x07, 0xc8, 0xfd, 0xcf, 0xc0, 0x10, 0xfb, 0xa1, 0x56, 0x85, 0x9a, 0xa1, 0x55, 0x5e, 0x8c, + 0x25, 0x3c, 0xbd, 0x88, 0x86, 0x0b, 0x2e, 0xa2, 0x67, 0x61, 0xbc, 0xe6, 0x90, 0xad, 0xc0, 0x5f, + 0xf6, 0xdd, 0x76, 0xe0, 0xf9, 0x31, 0x9a, 0x81, 0x0a, 0x3b, 0x4f, 0xf8, 0x7e, 0xaf, 0x50, 0x0a, + 0xb8, 0x42, 0x65, 0x77, 0xfb, 0x4f, 0x4a, 0x30, 0x56, 0x23, 0x2d, 0x12, 0x13, 0xae, 0xf7, 0x44, + 0x68, 0x05, 0xd0, 0x46, 0xe8, 0x34, 0x49, 0x9d, 0x84, 0x5e, 0xe0, 0x36, 0x48, 0x33, 0xf0, 0xd9, + 0xdd, 0x15, 0x3d, 0x20, 0x4f, 0xed, 0xed, 0xce, 0xa1, 0xcb, 0x5d, 0x50, 0x9c, 0x51, 0x03, 0xb9, + 0x30, 0xd6, 0x0e, 0x89, 0x61, 0x37, 0xb4, 0xf2, 0x45, 0x8d, 0xba, 0x5e, 0x85, 0x4b, 0xc3, 0x46, + 0x11, 0x36, 0x89, 0xa2, 0x4f, 0xc1, 0x64, 0x10, 0xb6, 0x37, 0x1d, 0xbf, 0x46, 0xda, 0xc4, 0x77, + 0xa9, 0x0a, 0x20, 0xac, 0x1d, 0xd3, 0x7b, 0xbb, 0x73, 0x93, 0x37, 0x53, 0x30, 0xdc, 0x85, 0x8d, + 0xde, 0x80, 0xa9, 0x76, 0x18, 0xb4, 0x9d, 0x0d, 0xb6, 0x64, 0x84, 0xb4, 0xc2, 0x79, 0xd3, 0xb9, + 0xbd, 0xdd, 0xb9, 0xa9, 0x7a, 0x1a, 0xb8, 0xbf, 0x3b, 0x77, 0x82, 0x0d, 0x19, 0x2d, 0x49, 0x80, + 0xb8, 0x9b, 0x8c, 0xfd, 0x2e, 0x9c, 0xac, 0x05, 0xf7, 0xfd, 0xfb, 0x4e, 0xe8, 0x2e, 0xd4, 0x57, + 0x35, 0xe3, 0xc4, 0xeb, 0x52, 0xf9, 0xe5, 0x77, 0x82, 0x39, 0x27, 0x9b, 0x46, 0x83, 0xab, 0x1d, + 0x2b, 0x5e, 0x8b, 0xf4, 0x30, 0x87, 0xfc, 0xe3, 0x92, 0xd1, 0x66, 0x82, 0xaf, 0xee, 0x2e, 0xac, + 0x9e, 0x77, 0x17, 0x9f, 0x81, 0xe1, 0x75, 0x8f, 0xb4, 0x5c, 0x4c, 0xd6, 0xc5, 0x6c, 0x5d, 0x28, + 0x72, 0xb9, 0xb3, 0x42, 0xeb, 0x48, 0xeb, 0x18, 0x57, 0xa2, 0x57, 0x04, 0x19, 0xac, 0x08, 0xa2, + 0x0e, 0x4c, 0x4a, 0x3d, 0x4c, 0x42, 0xc5, 0x66, 0x7f, 0xa1, 0x98, 0x9a, 0x67, 0x36, 0xc3, 0xa6, + 0x17, 0xa7, 0x08, 0xe2, 0xae, 0x26, 0xa8, 0xfe, 0xbc, 0x45, 0x8f, 0xba, 0x0a, 0x5b, 0xfa, 0x4c, + 0x7f, 0x66, 0xa6, 0x00, 0x56, 0x6a, 0xff, 0x9a, 0x05, 0x8f, 0x74, 0x8d, 0x96, 0xb0, 0x93, 0x1c, + 0xd9, 0x1c, 0xa5, 0x8d, 0x15, 0xa5, 0x7c, 0x63, 0x85, 0xfd, 0xeb, 0x16, 0x4c, 0x2f, 0x6f, 0xb5, + 0xe3, 0x9d, 0x9a, 0x67, 0xde, 0xb9, 0xbc, 0x0c, 0x83, 0x5b, 0xc4, 0xf5, 0x3a, 0x5b, 0x62, 0x5e, + 0xe7, 0xe4, 0xc1, 0x70, 0x9d, 0x95, 0xee, 0xef, 0xce, 0x8d, 0x35, 0xe2, 0x20, 0x74, 0x36, 0x08, + 0x2f, 0xc0, 0x02, 0x9d, 0x5d, 0x29, 0x79, 0xef, 0x91, 0x6b, 0xde, 0x96, 0x27, 0xaf, 0xf2, 0x0e, + 0x34, 0xf2, 0xcd, 0xcb, 0xa1, 0x9d, 0x7f, 0xad, 0xe3, 0xf8, 0xb1, 0x17, 0xef, 0x98, 0xf2, 0x32, + 0x23, 0x84, 0x13, 0x9a, 0xf6, 0x0f, 0x2c, 0x98, 0x90, 0x1c, 0x68, 0xc1, 0x75, 0x43, 0x12, 0x45, + 0x68, 0x16, 0x4a, 0x5e, 0x5b, 0xf4, 0x14, 0x44, 0xed, 0xd2, 0x6a, 0x1d, 0x97, 0xbc, 0x36, 0x7a, + 0x03, 0xaa, 0xfc, 0x2e, 0x30, 0x59, 0x7e, 0x7d, 0xde, 0x2d, 0x32, 0xed, 0x73, 0x4d, 0xd2, 0xc0, + 0x09, 0x39, 0x29, 0x87, 0xb3, 0xb3, 0xad, 0x6c, 0xde, 0x4c, 0x5d, 0x11, 0xe5, 0x58, 0x61, 0xa0, + 0xb3, 0x30, 0xec, 0x07, 0x2e, 0xbf, 0xae, 0xe5, 0x9c, 0x80, 0x2d, 0xea, 0x1b, 0xa2, 0x0c, 0x2b, + 0xa8, 0xfd, 0x45, 0x0b, 0x46, 0xe5, 0x37, 0x16, 0x54, 0x09, 0xe8, 0x36, 0x4c, 0xd4, 0x81, 0x64, + 0x1b, 0x52, 0x91, 0x9e, 0x41, 0x0c, 0x49, 0xbe, 0xdc, 0x8f, 0x24, 0x6f, 0xff, 0x46, 0x09, 0xc6, + 0x65, 0x77, 0x1a, 0x9d, 0xbb, 0x11, 0xa1, 0x82, 0x4e, 0xd5, 0xe1, 0x83, 0x4f, 0xe4, 0x4a, 0x7e, + 0x3e, 0x4f, 0xdb, 0x33, 0xe6, 0x2c, 0x99, 0xe5, 0x05, 0x49, 0x07, 0x27, 0x24, 0xd1, 0x36, 0x4c, + 0xf9, 0x41, 0xcc, 0x0e, 0x50, 0x05, 0x2f, 0x76, 0x97, 0x92, 0x6e, 0xe7, 0x51, 0xd1, 0xce, 0xd4, + 0x8d, 0x34, 0x3d, 0xdc, 0xdd, 0x04, 0xba, 0x29, 0xad, 0x58, 0x65, 0xd6, 0xd6, 0xb3, 0xc5, 0xda, + 0xea, 0x6d, 0xc4, 0xb2, 0x7f, 0xd7, 0x82, 0xaa, 0x44, 0x3b, 0x8e, 0x4b, 0xb5, 0x3b, 0x30, 0x14, + 0xb1, 0x29, 0x92, 0xc3, 0x75, 0xae, 0xd8, 0x27, 0xf0, 0x79, 0x4d, 0xa4, 0x06, 0xfe, 0x3f, 0xc2, + 0x92, 0x1a, 0x33, 0xe7, 0xab, 0x0f, 0xf9, 0xc0, 0x99, 0xf3, 0x55, 0xcf, 0x7a, 0xdf, 0x9d, 0x8d, + 0x19, 0xf6, 0x06, 0x2a, 0xfa, 0xb6, 0x43, 0xb2, 0xee, 0x3d, 0x48, 0x8b, 0xbe, 0x75, 0x56, 0x8a, + 0x05, 0x14, 0xad, 0xc3, 0x68, 0x53, 0x1a, 0xbc, 0x13, 0x16, 0xf2, 0xd1, 0x82, 0xb7, 0x0b, 0xea, + 0xa2, 0x8a, 0xfb, 0x4b, 0x2d, 0x69, 0x94, 0xb0, 0x41, 0x97, 0xf2, 0xa9, 0xe4, 0x2e, 0xbe, 0x5c, + 0xd0, 0x34, 0x14, 0x92, 0x38, 0x69, 0xa1, 0xe7, 0x35, 0xbc, 0xfd, 0x55, 0x0b, 0x06, 0xb9, 0x85, + 0xb4, 0x98, 0x99, 0x59, 0xbb, 0x82, 0x4b, 0xc6, 0xf3, 0x36, 0x2d, 0x14, 0x37, 0x72, 0xe8, 0x0e, + 0x54, 0xd9, 0x0f, 0x66, 0xed, 0x29, 0x17, 0x71, 0x1e, 0xe3, 0xed, 0xeb, 0x5d, 0xbd, 0x2d, 0x09, + 0xe0, 0x84, 0x96, 0xfd, 0x9d, 0x32, 0x65, 0x7d, 0x09, 0xaa, 0x21, 0x3d, 0x58, 0xc7, 0x21, 0x3d, + 0x94, 0x8e, 0x5e, 0x7a, 0x78, 0x17, 0x26, 0x9a, 0xda, 0x15, 0x60, 0x32, 0xe3, 0x17, 0x0b, 0x2e, + 0x2b, 0xed, 0xde, 0x90, 0x5b, 0x04, 0x97, 0x4c, 0x72, 0x38, 0x4d, 0x1f, 0x11, 0x18, 0xe5, 0xeb, + 0x41, 0xb4, 0x57, 0x61, 0xed, 0x9d, 0x2f, 0xb2, 0xc2, 0xf4, 0xc6, 0xd8, 0x2a, 0x6e, 0x68, 0x84, + 0xb0, 0x41, 0xd6, 0xfe, 0x95, 0x01, 0x18, 0x58, 0xde, 0x26, 0x7e, 0x7c, 0x0c, 0xac, 0x6e, 0x0b, + 0xc6, 0x3d, 0x7f, 0x3b, 0x68, 0x6d, 0x13, 0x97, 0xc3, 0x0f, 0x77, 0xbc, 0x9f, 0x12, 0x8d, 0x8c, + 0xaf, 0x1a, 0xc4, 0x70, 0x8a, 0xf8, 0x51, 0xd8, 0x22, 0x5e, 0x83, 0x41, 0xbe, 0x32, 0x84, 0x21, + 0x22, 0xe7, 0xc6, 0x80, 0x0d, 0xac, 0xd8, 0x41, 0x89, 0xc5, 0x84, 0x5f, 0x56, 0x08, 0x42, 0xe8, + 0x1d, 0x18, 0x5f, 0xf7, 0xc2, 0x28, 0x5e, 0xf3, 0xb6, 0xa8, 0x0e, 0xb9, 0xd5, 0x3e, 0x84, 0x15, + 0x42, 0x8d, 0xc8, 0x8a, 0x41, 0x09, 0xa7, 0x28, 0xa3, 0x0d, 0x18, 0xa3, 0x4a, 0x70, 0xd2, 0xd4, + 0x50, 0xdf, 0x4d, 0x29, 0x23, 0xe4, 0x35, 0x9d, 0x10, 0x36, 0xe9, 0x52, 0x96, 0xd4, 0x64, 0x4a, + 0xf3, 0x30, 0x93, 0x6e, 0x14, 0x4b, 0xe2, 0xda, 0x32, 0x87, 0x51, 0xce, 0xc6, 0x7c, 0x71, 0xaa, + 0x26, 0x67, 0x4b, 0x3c, 0x6e, 0xec, 0xaf, 0xd3, 0xb3, 0x98, 0x8e, 0xe1, 0x31, 0x1c, 0x5f, 0x57, + 0xcc, 0xe3, 0xeb, 0xc9, 0x02, 0x33, 0xdb, 0xe3, 0xe8, 0x7a, 0x1b, 0x46, 0xb4, 0x89, 0x47, 0xe7, + 0xa1, 0xda, 0x94, 0xee, 0x22, 0x82, 0x8b, 0x2b, 0x51, 0x4a, 0xf9, 0x91, 0xe0, 0x04, 0x87, 0x8e, + 0x0b, 0x15, 0x41, 0xd3, 0xce, 0x65, 0x54, 0x40, 0xc5, 0x0c, 0x62, 0xbf, 0x00, 0xb0, 0xfc, 0x80, + 0x34, 0x17, 0xb8, 0x12, 0xa9, 0xdd, 0x20, 0x5a, 0xbd, 0x6f, 0x10, 0xed, 0xaf, 0x59, 0x30, 0xbe, + 0xb2, 0x64, 0x28, 0x0d, 0xf3, 0x00, 0x5c, 0x36, 0xbe, 0x73, 0xe7, 0x86, 0xb4, 0x90, 0x73, 0x33, + 0xa6, 0x2a, 0xc5, 0x1a, 0x06, 0x7a, 0x14, 0xca, 0xad, 0x8e, 0x2f, 0x44, 0xd6, 0xa1, 0xbd, 0xdd, + 0xb9, 0xf2, 0xb5, 0x8e, 0x8f, 0x69, 0x99, 0xe6, 0xc5, 0x55, 0x2e, 0xec, 0xc5, 0x95, 0xef, 0x02, + 0xfd, 0xe5, 0x32, 0x4c, 0xae, 0xb4, 0xc8, 0x03, 0xa3, 0xd7, 0x4f, 0xc3, 0xa0, 0x1b, 0x7a, 0xdb, + 0x24, 0x4c, 0x0b, 0x02, 0x35, 0x56, 0x8a, 0x05, 0xb4, 0xb0, 0x63, 0xd9, 0x5b, 0xdd, 0x07, 0xf9, + 0xd1, 0x39, 0xd5, 0xe5, 0x7e, 0x33, 0x5a, 0x87, 0x21, 0x7e, 0xe3, 0x1c, 0xcd, 0x0c, 0xb0, 0xa5, + 0xf8, 0xca, 0xc1, 0x9d, 0x49, 0x8f, 0xcf, 0xbc, 0xb0, 0xe0, 0x70, 0x97, 0x1e, 0xc5, 0xcb, 0x44, + 0x29, 0x96, 0xc4, 0x67, 0x3f, 0x01, 0xa3, 0x3a, 0x66, 0x5f, 0xbe, 0x3d, 0x7f, 0xd5, 0x82, 0x13, + 0x2b, 0xad, 0xa0, 0x79, 0x2f, 0xe5, 0xf9, 0xf7, 0x12, 0x8c, 0xd0, 0xcd, 0x14, 0x19, 0x6e, 0xb1, + 0x86, 0xcb, 0xb0, 0x00, 0x61, 0x1d, 0x4f, 0xab, 0x76, 0xeb, 0xd6, 0x6a, 0x2d, 0xcb, 0xd3, 0x58, + 0x80, 0xb0, 0x8e, 0x67, 0xff, 0xbe, 0x05, 0x8f, 0x5f, 0x5e, 0x5a, 0xae, 0x93, 0x30, 0xf2, 0xa2, + 0x98, 0xf8, 0x71, 0x97, 0xb3, 0x33, 0x95, 0x19, 0x5d, 0xad, 0x2b, 0x89, 0xcc, 0x58, 0x63, 0xbd, + 0x10, 0xd0, 0x0f, 0x8a, 0xc7, 0xff, 0x57, 0x2d, 0x38, 0x71, 0xd9, 0x8b, 0x31, 0x69, 0x07, 0x69, + 0x67, 0xe3, 0x90, 0xb4, 0x83, 0xc8, 0x8b, 0x83, 0x70, 0x27, 0xed, 0x6c, 0x8c, 0x15, 0x04, 0x6b, + 0x58, 0xbc, 0xe5, 0x6d, 0x2f, 0xa2, 0x3d, 0x2d, 0x99, 0xaa, 0x2e, 0x16, 0xe5, 0x58, 0x61, 0xd0, + 0x0f, 0x73, 0xbd, 0x90, 0x89, 0x0c, 0x3b, 0x62, 0x07, 0xab, 0x0f, 0xab, 0x49, 0x00, 0x4e, 0x70, + 0xec, 0xbf, 0x6b, 0xc1, 0xc9, 0xcb, 0xad, 0x4e, 0x14, 0x93, 0x70, 0x3d, 0x32, 0x3a, 0xfb, 0x02, + 0x54, 0x89, 0x14, 0xee, 0x45, 0x5f, 0xd5, 0xa1, 0xa1, 0xa4, 0x7e, 0xee, 0xe9, 0xac, 0xf0, 0x0a, + 0x38, 0xd4, 0xf6, 0xe7, 0xfe, 0xf9, 0x5b, 0x25, 0x18, 0xbb, 0xb2, 0xb6, 0x56, 0xbf, 0x4c, 0x62, + 0xc1, 0x25, 0xf3, 0xcd, 0x5e, 0x58, 0xd3, 0xc8, 0x0f, 0x12, 0x7e, 0x3a, 0xb1, 0xd7, 0x9a, 0xe7, + 0xd1, 0x28, 0xf3, 0xab, 0x7e, 0x7c, 0x33, 0x6c, 0xc4, 0xa1, 0xe7, 0x6f, 0x64, 0xea, 0xf0, 0x92, + 0x97, 0x97, 0x7b, 0xf1, 0x72, 0xf4, 0x02, 0x0c, 0xb2, 0x70, 0x18, 0x29, 0x7c, 0x7c, 0x58, 0xc9, + 0x09, 0xac, 0x74, 0x7f, 0x77, 0xae, 0x7a, 0x0b, 0xaf, 0xf2, 0x3f, 0x58, 0xa0, 0xa2, 0xb7, 0x60, + 0x64, 0x33, 0x8e, 0xdb, 0x57, 0x88, 0xe3, 0x92, 0x50, 0xf2, 0x89, 0xb3, 0x07, 0xf3, 0x09, 0x3a, + 0x1c, 0xbc, 0x42, 0xb2, 0xb5, 0x92, 0xb2, 0x08, 0xeb, 0x14, 0xed, 0x06, 0x40, 0x02, 0x7b, 0x48, + 0x3a, 0x88, 0xfd, 0xf3, 0x25, 0x18, 0xba, 0xe2, 0xf8, 0x6e, 0x8b, 0x84, 0x68, 0x05, 0x2a, 0xe4, + 0x01, 0x69, 0x8a, 0x83, 0x3c, 0xa7, 0xeb, 0xc9, 0x61, 0xc7, 0x2d, 0x77, 0xf4, 0x3f, 0x66, 0xf5, + 0x11, 0x86, 0x21, 0xda, 0xef, 0xcb, 0xca, 0x0f, 0xfd, 0xb9, 0xfc, 0x51, 0x50, 0x8b, 0x82, 0x9f, + 0x94, 0xa2, 0x08, 0x4b, 0x42, 0xcc, 0x02, 0xd5, 0x6c, 0x37, 0x28, 0x7b, 0x8b, 0x8b, 0x69, 0x76, + 0x6b, 0x4b, 0x75, 0x8e, 0x2e, 0xe8, 0x72, 0x0b, 0x94, 0x2c, 0xc4, 0x09, 0x39, 0x7b, 0x0d, 0xaa, + 0x74, 0xf2, 0x17, 0x5a, 0x9e, 0x73, 0xb0, 0x19, 0xec, 0x39, 0xa8, 0x4a, 0x43, 0x54, 0x24, 0x9c, + 0xda, 0x19, 0x55, 0x69, 0xa7, 0x8a, 0x70, 0x02, 0xb7, 0x2f, 0xc1, 0x34, 0xbb, 0x47, 0x76, 0xe2, + 0x4d, 0x63, 0x2f, 0xe6, 0x2e, 0x7a, 0xfb, 0x1b, 0x15, 0x98, 0x5a, 0x6d, 0x2c, 0x35, 0x4c, 0x9b, + 0xe7, 0x25, 0x18, 0xe5, 0xc7, 0x3e, 0x5d, 0xca, 0x4e, 0x4b, 0xd4, 0x57, 0x77, 0x1f, 0x6b, 0x1a, + 0x0c, 0x1b, 0x98, 0xe8, 0x71, 0x28, 0x7b, 0xef, 0xfa, 0x69, 0x6f, 0xc4, 0xd5, 0xd7, 0x6e, 0x60, + 0x5a, 0x4e, 0xc1, 0x54, 0x82, 0xe0, 0xac, 0x53, 0x81, 0x95, 0x14, 0xf1, 0x2a, 0x8c, 0x7b, 0x51, + 0x33, 0xf2, 0x56, 0x7d, 0xca, 0x57, 0x9c, 0xa6, 0xdc, 0x14, 0x89, 0xc8, 0x4f, 0xbb, 0xaa, 0xa0, + 0x38, 0x85, 0xad, 0xf1, 0xf1, 0x81, 0xc2, 0x52, 0x48, 0xae, 0x9b, 0x3b, 0x15, 0xb0, 0xda, 0xec, + 0xeb, 0x22, 0xe6, 0xdb, 0x24, 0x04, 0x2c, 0xfe, 0xc1, 0x11, 0x96, 0x30, 0x74, 0x19, 0xa6, 0x9a, + 0x9b, 0x4e, 0x7b, 0xa1, 0x13, 0x6f, 0xd6, 0xbc, 0xa8, 0x19, 0x6c, 0x93, 0x70, 0x87, 0x09, 0xc0, + 0xc3, 0x89, 0x4d, 0x4b, 0x01, 0x96, 0xae, 0x2c, 0xd4, 0x29, 0x26, 0xee, 0xae, 0x63, 0x0a, 0x24, + 0x70, 0x04, 0x02, 0xc9, 0x02, 0x4c, 0xc8, 0x56, 0x1b, 0x24, 0x62, 0x47, 0xc4, 0x08, 0xeb, 0xa7, + 0x0a, 0x30, 0x12, 0xc5, 0xaa, 0x97, 0x69, 0x7c, 0xfb, 0x1d, 0xa8, 0x2a, 0x5f, 0x3c, 0xe9, 0x82, + 0x6a, 0xf5, 0x70, 0x41, 0xcd, 0x67, 0xee, 0xd2, 0x3a, 0x5f, 0xce, 0xb4, 0xce, 0xff, 0x53, 0x0b, + 0x12, 0x67, 0x22, 0x84, 0xa1, 0xda, 0x0e, 0xd8, 0x4d, 0x5e, 0x28, 0xaf, 0xcc, 0x9f, 0xca, 0xd9, + 0xf3, 0x9c, 0xe7, 0xf0, 0x01, 0xa9, 0xcb, 0xba, 0x38, 0x21, 0x83, 0xae, 0xc1, 0x50, 0x3b, 0x24, + 0x8d, 0x98, 0xc5, 0x8f, 0xf4, 0x41, 0x91, 0x2f, 0x04, 0x5e, 0x13, 0x4b, 0x12, 0xf6, 0xbf, 0xb4, + 0x00, 0xb8, 0x19, 0xdc, 0xf1, 0x37, 0xc8, 0x31, 0x28, 0xd6, 0x37, 0xa0, 0x12, 0xb5, 0x49, 0xb3, + 0xd8, 0x5d, 0x6c, 0xd2, 0xb3, 0x46, 0x9b, 0x34, 0x93, 0xe9, 0xa0, 0xff, 0x30, 0xa3, 0x63, 0x7f, + 0x1b, 0x60, 0x3c, 0x41, 0xa3, 0xca, 0x0d, 0x7a, 0xde, 0x08, 0x9c, 0x78, 0x34, 0x15, 0x38, 0x51, + 0x65, 0xd8, 0x5a, 0xac, 0x44, 0x0c, 0xe5, 0x2d, 0xe7, 0x81, 0xd0, 0xa5, 0x5e, 0x2a, 0xda, 0x21, + 0xda, 0xd2, 0xfc, 0x75, 0xe7, 0x01, 0x17, 0x5d, 0x9f, 0x93, 0x0b, 0xe9, 0xba, 0xf3, 0x60, 0x9f, + 0xdf, 0xb8, 0x32, 0xee, 0x44, 0x95, 0xb7, 0xcf, 0xfd, 0x59, 0xf2, 0x9f, 0x1d, 0x43, 0xb4, 0x39, + 0xd6, 0xaa, 0xe7, 0x0b, 0x53, 0x70, 0x9f, 0xad, 0x7a, 0x7e, 0xba, 0x55, 0xcf, 0x2f, 0xd0, 0xaa, + 0xc7, 0x3c, 0x8c, 0x87, 0xc4, 0x1d, 0x0d, 0x73, 0xcf, 0x1c, 0xb9, 0xf8, 0xf1, 0xbe, 0x9a, 0x16, + 0x97, 0x3d, 0xbc, 0xf9, 0xf3, 0x52, 0x5e, 0x17, 0xa5, 0xb9, 0x5d, 0x90, 0x4d, 0xa3, 0xbf, 0x67, + 0xc1, 0xb8, 0xf8, 0x8d, 0xc9, 0xbb, 0x1d, 0x12, 0xc5, 0x42, 0x2e, 0xf8, 0xd4, 0x61, 0x7a, 0x23, + 0x48, 0xf0, 0x4e, 0x7d, 0x4c, 0xb2, 0x5f, 0x13, 0x98, 0xdb, 0xb7, 0x54, 0x7f, 0xd0, 0xb7, 0x2d, + 0x98, 0xde, 0x72, 0x1e, 0xf0, 0x16, 0x79, 0x19, 0x76, 0x62, 0x2f, 0x10, 0x2e, 0xa8, 0x2b, 0xfd, + 0xae, 0x93, 0x2e, 0x42, 0xbc, 0xbb, 0xd2, 0xbb, 0x6c, 0x3a, 0x0b, 0x25, 0xb7, 0xd3, 0x99, 0x3d, + 0x9c, 0x5d, 0x87, 0x61, 0xb9, 0x30, 0x33, 0x34, 0xa5, 0x9a, 0x2e, 0xfe, 0xf4, 0x7d, 0x81, 0xa6, + 0x69, 0x56, 0xac, 0x1d, 0xb1, 0x14, 0x8f, 0xb4, 0x9d, 0x77, 0x60, 0x54, 0x5f, 0x77, 0x47, 0xda, + 0xd6, 0xbb, 0x70, 0x22, 0x63, 0x55, 0x1d, 0x69, 0x93, 0xf7, 0xe1, 0xd1, 0x9e, 0xeb, 0xe3, 0x28, + 0x1b, 0xb6, 0x7f, 0xcb, 0xd2, 0x59, 0xe7, 0x31, 0xd8, 0xad, 0xae, 0x9b, 0x76, 0xab, 0xb3, 0x45, + 0xf7, 0x50, 0x0f, 0xe3, 0xd5, 0xba, 0xde, 0x7d, 0x7a, 0x24, 0xa0, 0x35, 0x18, 0x6c, 0xd1, 0x12, + 0x79, 0x6d, 0x78, 0xae, 0x9f, 0x5d, 0x9a, 0x48, 0x60, 0xac, 0x3c, 0xc2, 0x82, 0x96, 0xfd, 0x6d, + 0x0b, 0x2a, 0x7f, 0x89, 0x61, 0x5d, 0x5d, 0xa4, 0x45, 0x6a, 0x82, 0x79, 0xec, 0xdc, 0x5f, 0x7e, + 0x10, 0x13, 0x3f, 0x62, 0x62, 0x7c, 0xe6, 0x10, 0xfd, 0x9f, 0x12, 0x8c, 0xd0, 0xa6, 0xa4, 0xa7, + 0xcc, 0x2b, 0x30, 0xd6, 0x72, 0xee, 0x92, 0x96, 0xb4, 0xb9, 0xa7, 0x95, 0xde, 0x6b, 0x3a, 0x10, + 0x9b, 0xb8, 0xb4, 0xf2, 0xba, 0x7e, 0x25, 0x21, 0x84, 0x24, 0x55, 0xd9, 0xb8, 0xaf, 0xc0, 0x26, + 0x2e, 0xd5, 0xba, 0xee, 0x3b, 0x71, 0x73, 0x53, 0x28, 0xc4, 0xaa, 0xbb, 0x77, 0x68, 0x21, 0xe6, + 0x30, 0x2a, 0xec, 0xc9, 0x15, 0x7b, 0x9b, 0x84, 0x4c, 0xd8, 0xe3, 0x42, 0xb5, 0x12, 0xf6, 0xb0, + 0x09, 0xc6, 0x69, 0x7c, 0xf4, 0x09, 0x18, 0xa7, 0x83, 0x13, 0x74, 0x62, 0xe9, 0x07, 0x34, 0xc0, + 0xfc, 0x80, 0x98, 0x1b, 0xf9, 0x9a, 0x01, 0xc1, 0x29, 0x4c, 0x54, 0x87, 0x69, 0xcf, 0x6f, 0xb6, + 0x3a, 0x2e, 0xb9, 0xe5, 0x7b, 0xbe, 0x17, 0x7b, 0x4e, 0xcb, 0x7b, 0x8f, 0xb8, 0x42, 0xec, 0x56, + 0x2e, 0x5b, 0xab, 0x19, 0x38, 0x38, 0xb3, 0xa6, 0xfd, 0x16, 0x9c, 0xb8, 0x16, 0x38, 0xee, 0xa2, + 0xd3, 0x72, 0xfc, 0x26, 0x09, 0x57, 0xfd, 0x8d, 0x5c, 0x9f, 0x02, 0xfd, 0xde, 0xbf, 0x94, 0x77, + 0xef, 0x6f, 0x87, 0x80, 0xf4, 0x06, 0x84, 0x4f, 0xdc, 0x9b, 0x30, 0xe4, 0xf1, 0xa6, 0xc4, 0x46, + 0xb8, 0x90, 0x27, 0x93, 0x77, 0xf5, 0x51, 0xf3, 0xf1, 0xe2, 0x05, 0x58, 0x92, 0xa4, 0x1a, 0x5c, + 0x96, 0x10, 0x9f, 0xaf, 0x7a, 0xdb, 0x2f, 0xc1, 0x14, 0xab, 0xd9, 0xa7, 0xe2, 0xf7, 0xd7, 0x2c, + 0x98, 0xb8, 0x91, 0x0a, 0x80, 0x7e, 0x1a, 0x06, 0x23, 0x12, 0x66, 0x58, 0x56, 0x1b, 0xac, 0x14, + 0x0b, 0xe8, 0x43, 0xb7, 0xd6, 0xfc, 0x72, 0x09, 0xaa, 0xcc, 0x29, 0xbb, 0x4d, 0x95, 0xb8, 0xa3, + 0x97, 0x97, 0xaf, 0x1b, 0xf2, 0x72, 0x8e, 0xc5, 0x40, 0x75, 0xac, 0x97, 0xb8, 0x8c, 0x6e, 0xa9, + 0xc0, 0xe0, 0x42, 0xc6, 0x82, 0x84, 0x20, 0x0f, 0x1e, 0x1d, 0x37, 0xe3, 0x88, 0x65, 0xd0, 0x30, + 0xbb, 0xc0, 0x57, 0xb8, 0x1f, 0xb8, 0x0b, 0x7c, 0xd5, 0xb3, 0x1e, 0x5c, 0xb2, 0xae, 0x75, 0x9e, + 0x9d, 0x23, 0x3f, 0xc5, 0x5c, 0x6d, 0xd9, 0x1e, 0x56, 0xf1, 0xf5, 0x73, 0xc2, 0x75, 0x56, 0x94, + 0xee, 0x33, 0x86, 0x27, 0xfe, 0xf1, 0xf4, 0x09, 0x49, 0x15, 0xfb, 0x0a, 0x4c, 0xa4, 0x86, 0x0e, + 0xbd, 0x04, 0x03, 0xed, 0x4d, 0x27, 0x22, 0x29, 0xa7, 0xa7, 0x81, 0x3a, 0x2d, 0xdc, 0xdf, 0x9d, + 0x1b, 0x57, 0x15, 0x58, 0x09, 0xe6, 0xd8, 0xf6, 0xe7, 0x4b, 0x50, 0xb9, 0x11, 0xb8, 0xc7, 0xb1, + 0xd4, 0xae, 0x18, 0x4b, 0xed, 0xe9, 0xfc, 0x7c, 0x2d, 0x3d, 0x57, 0x59, 0x3d, 0xb5, 0xca, 0xce, + 0x16, 0xa0, 0x75, 0xf0, 0x02, 0xdb, 0x82, 0x11, 0x96, 0x0f, 0x46, 0x38, 0x65, 0xbd, 0x60, 0xa8, + 0x78, 0x73, 0x29, 0x15, 0x6f, 0x42, 0x43, 0xd5, 0x14, 0xbd, 0x67, 0x60, 0x48, 0x38, 0x01, 0xa5, + 0x1d, 0x8d, 0x05, 0x2e, 0x96, 0x70, 0xfb, 0x5f, 0x94, 0xc1, 0xc8, 0x3f, 0x83, 0x7e, 0xd7, 0x82, + 0xf9, 0x90, 0x07, 0x6d, 0xb9, 0xb5, 0x4e, 0xe8, 0xf9, 0x1b, 0x8d, 0xe6, 0x26, 0x71, 0x3b, 0x2d, + 0xcf, 0xdf, 0x58, 0xdd, 0xf0, 0x03, 0x55, 0xbc, 0xfc, 0x80, 0x34, 0x3b, 0xcc, 0xe6, 0x5e, 0x38, + 0xed, 0x8d, 0xba, 0x00, 0xbf, 0xb8, 0xb7, 0x3b, 0x37, 0x8f, 0xfb, 0x6a, 0x05, 0xf7, 0xd9, 0x2b, + 0xf4, 0x7d, 0x0b, 0xce, 0xf3, 0x0c, 0x2c, 0xc5, 0xbf, 0xa4, 0x90, 0x6a, 0x5c, 0x97, 0x44, 0x13, + 0x72, 0x6b, 0x24, 0xdc, 0x5a, 0x7c, 0x59, 0x0c, 0xf2, 0xf9, 0x7a, 0x7f, 0xad, 0xe2, 0x7e, 0xbb, + 0x69, 0xff, 0xeb, 0x32, 0x8c, 0xd1, 0xf1, 0x4c, 0x52, 0x28, 0xbc, 0x64, 0x2c, 0x93, 0x27, 0x52, + 0xcb, 0x64, 0xca, 0x40, 0x7e, 0x38, 0xd9, 0x13, 0x22, 0x98, 0x6a, 0x39, 0x51, 0x7c, 0x85, 0x38, + 0x61, 0x7c, 0x97, 0x38, 0xec, 0x9e, 0x39, 0xed, 0xc3, 0x52, 0xe0, 0xea, 0x5a, 0x19, 0xe1, 0xae, + 0xa5, 0x89, 0xe1, 0x6e, 0xfa, 0x68, 0x1b, 0x10, 0xbb, 0xd3, 0x0e, 0x1d, 0x3f, 0xe2, 0xdf, 0xe2, + 0x09, 0x1b, 0x7d, 0x7f, 0xad, 0xce, 0x8a, 0x56, 0xd1, 0xb5, 0x2e, 0x6a, 0x38, 0xa3, 0x05, 0xcd, + 0x6b, 0x61, 0xa0, 0xa8, 0xd7, 0xc2, 0x60, 0x8e, 0x87, 0xff, 0x2f, 0x58, 0x70, 0x82, 0x4e, 0x8b, + 0xe9, 0x0d, 0x1e, 0xa1, 0x00, 0x26, 0xe8, 0xb2, 0x6b, 0x91, 0x58, 0x96, 0x89, 0xfd, 0x95, 0x23, + 0xe2, 0x9b, 0x74, 0x12, 0x39, 0xf2, 0xaa, 0x49, 0x0c, 0xa7, 0xa9, 0xdb, 0x5f, 0xb3, 0x80, 0x79, + 0x4f, 0x1e, 0xc3, 0x61, 0x76, 0xd9, 0x3c, 0xcc, 0xec, 0x7c, 0x8e, 0xd1, 0xe3, 0x1c, 0x7b, 0x11, + 0x26, 0x29, 0xb4, 0x1e, 0x06, 0x0f, 0x76, 0xa4, 0xc4, 0x9f, 0x2f, 0x5d, 0xfd, 0x42, 0x89, 0x6f, + 0x1b, 0x15, 0x7d, 0x8a, 0x7e, 0xd1, 0x82, 0xe1, 0xa6, 0xd3, 0x76, 0x9a, 0x3c, 0x7b, 0x57, 0x01, + 0x33, 0x91, 0x51, 0x7f, 0x7e, 0x49, 0xd4, 0xe5, 0x26, 0x8e, 0x8f, 0xca, 0x4f, 0x97, 0xc5, 0xb9, + 0x66, 0x0d, 0xd5, 0xf8, 0xec, 0x3d, 0x18, 0x33, 0x88, 0x1d, 0xa9, 0x3e, 0xfc, 0x8b, 0x16, 0x67, + 0xfa, 0x4a, 0x67, 0xb9, 0x0f, 0x53, 0xbe, 0xf6, 0x9f, 0xb2, 0x33, 0x29, 0x50, 0xcf, 0x17, 0x67, + 0xeb, 0x8c, 0x0b, 0x6a, 0x9e, 0xa2, 0x29, 0x82, 0xb8, 0xbb, 0x0d, 0xfb, 0x57, 0x2d, 0x78, 0x44, + 0x47, 0xd4, 0xc2, 0x85, 0xf3, 0x0c, 0xd8, 0x35, 0x18, 0x0e, 0xda, 0x24, 0x74, 0x12, 0xfd, 0xec, + 0xac, 0x1c, 0xff, 0x9b, 0xa2, 0x7c, 0x7f, 0x77, 0x6e, 0x5a, 0xa7, 0x2e, 0xcb, 0xb1, 0xaa, 0x89, + 0x6c, 0x18, 0x64, 0xe3, 0x12, 0x89, 0x40, 0x6f, 0x96, 0xcd, 0x8a, 0x5d, 0x90, 0x45, 0x58, 0x40, + 0xec, 0xbf, 0x69, 0xf1, 0xe5, 0xa6, 0x77, 0x1d, 0xfd, 0x0c, 0x4c, 0x6e, 0x51, 0x55, 0x6e, 0xf9, + 0x41, 0x3b, 0xe4, 0xe6, 0x77, 0x39, 0x62, 0x2f, 0x15, 0x1f, 0x31, 0xed, 0x73, 0x17, 0x67, 0x44, + 0xef, 0x27, 0xaf, 0xa7, 0xc8, 0xe2, 0xae, 0x86, 0xec, 0x7f, 0x50, 0xe2, 0x7b, 0x96, 0xc9, 0x70, + 0xcf, 0xc0, 0x50, 0x3b, 0x70, 0x97, 0x56, 0x6b, 0x58, 0x8c, 0x95, 0x62, 0x3a, 0x75, 0x5e, 0x8c, + 0x25, 0x1c, 0x5d, 0x04, 0x20, 0x0f, 0x62, 0x12, 0xfa, 0x4e, 0x4b, 0x5d, 0xe9, 0x2b, 0x51, 0x69, + 0x59, 0x41, 0xb0, 0x86, 0x45, 0xeb, 0xb4, 0xc3, 0x60, 0xdb, 0x73, 0x59, 0x9c, 0x4b, 0xd9, 0xac, + 0x53, 0x57, 0x10, 0xac, 0x61, 0x51, 0x05, 0xba, 0xe3, 0x47, 0xfc, 0x18, 0x73, 0xee, 0x8a, 0x4c, + 0x4a, 0xc3, 0x89, 0x02, 0x7d, 0x4b, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x85, 0xc1, 0xd8, 0x61, 0x17, + 0xd5, 0x03, 0x45, 0xbc, 0x7e, 0xd6, 0x28, 0xae, 0x9e, 0xba, 0x8a, 0x56, 0xc5, 0x82, 0x84, 0xfd, + 0x9f, 0xaa, 0x00, 0x89, 0xd4, 0x85, 0x3e, 0xdf, 0xbd, 0xe1, 0x3f, 0x56, 0x54, 0x64, 0x7b, 0x78, + 0xbb, 0x1d, 0x7d, 0xc9, 0x82, 0x11, 0xa7, 0xd5, 0x0a, 0x9a, 0x4e, 0xcc, 0x86, 0xa7, 0x54, 0x94, + 0xf5, 0x88, 0x9e, 0x2c, 0x24, 0x75, 0x79, 0x67, 0x5e, 0x90, 0x97, 0xc7, 0x1a, 0x24, 0xb7, 0x3f, + 0x7a, 0x17, 0xd0, 0x47, 0xa5, 0xd4, 0xce, 0x67, 0x78, 0x36, 0x2d, 0xb5, 0x57, 0x19, 0xc3, 0xd5, + 0x04, 0x76, 0xf4, 0x96, 0x91, 0x79, 0xa8, 0x52, 0x24, 0x58, 0xd9, 0x90, 0x43, 0xf2, 0x92, 0x0e, + 0xa1, 0x37, 0x74, 0xf7, 0xf8, 0x81, 0x22, 0xd9, 0x00, 0x34, 0x71, 0x38, 0xc7, 0x35, 0x3e, 0x86, + 0x09, 0xd7, 0x3c, 0x79, 0x85, 0x8b, 0xdf, 0x85, 0xfc, 0x16, 0x52, 0x47, 0x76, 0x72, 0xd6, 0xa6, + 0x00, 0x38, 0xdd, 0x04, 0x7a, 0x83, 0x07, 0x2f, 0xac, 0xfa, 0xeb, 0x81, 0x70, 0xf3, 0x3b, 0x57, + 0x60, 0xce, 0x77, 0xa2, 0x98, 0x6c, 0xd1, 0x3a, 0xc9, 0xe1, 0x7a, 0x43, 0x50, 0xc1, 0x8a, 0x1e, + 0x5a, 0x83, 0x41, 0x16, 0x9b, 0x16, 0xcd, 0x0c, 0x17, 0x31, 0x09, 0x9a, 0x21, 0xd9, 0xc9, 0xfe, + 0x61, 0x7f, 0x23, 0x2c, 0x68, 0xa1, 0x2b, 0x32, 0x29, 0x43, 0xb4, 0xea, 0xdf, 0x8a, 0x08, 0x4b, + 0xca, 0x50, 0x5d, 0xfc, 0x48, 0x92, 0x65, 0x81, 0x97, 0x67, 0xa6, 0x6b, 0x34, 0x6a, 0x52, 0xc1, + 0x46, 0xfc, 0x97, 0x59, 0x20, 0x67, 0xa0, 0x48, 0x47, 0xcd, 0x9c, 0x91, 0xc9, 0x60, 0xdf, 0x36, + 0x89, 0xe1, 0x34, 0xf5, 0x63, 0x3d, 0x52, 0x67, 0x7d, 0x98, 0x4c, 0x6f, 0xca, 0x23, 0x3d, 0xc2, + 0x7f, 0x58, 0x81, 0x71, 0x73, 0x71, 0xa0, 0xf3, 0x50, 0x15, 0x44, 0x54, 0x8a, 0x37, 0xb5, 0x07, + 0xae, 0x4b, 0x00, 0x4e, 0x70, 0x58, 0xb2, 0x3b, 0x56, 0x5d, 0x73, 0xf0, 0x4a, 0x92, 0xdd, 0x29, + 0x08, 0xd6, 0xb0, 0xa8, 0x24, 0x7c, 0x37, 0x08, 0x62, 0x75, 0x12, 0xa8, 0x75, 0xb3, 0xc8, 0x4a, + 0xb1, 0x80, 0xd2, 0x13, 0xe0, 0x1e, 0x9d, 0xcc, 0x96, 0x69, 0xde, 0x54, 0x27, 0xc0, 0x55, 0x1d, + 0x88, 0x4d, 0x5c, 0x7a, 0xa2, 0x05, 0x11, 0x5b, 0x88, 0x42, 0xde, 0x4e, 0x1c, 0xe6, 0x1a, 0x3c, + 0x5e, 0x53, 0xc2, 0xd1, 0xa7, 0xe1, 0x11, 0x15, 0x5e, 0x89, 0xb9, 0xb9, 0x58, 0xb6, 0x38, 0x68, + 0xa8, 0xcc, 0x8f, 0x2c, 0x65, 0xa3, 0xe1, 0x5e, 0xf5, 0xd1, 0xab, 0x30, 0x2e, 0x64, 0x65, 0x49, + 0x71, 0xc8, 0xf4, 0x7b, 0xb8, 0x6a, 0x40, 0x71, 0x0a, 0x1b, 0xd5, 0x60, 0x92, 0x96, 0x30, 0x21, + 0x55, 0x52, 0xe0, 0x61, 0xa2, 0xea, 0xa8, 0xbf, 0x9a, 0x82, 0xe3, 0xae, 0x1a, 0x68, 0x01, 0x26, + 0xb8, 0xb0, 0x42, 0x15, 0x43, 0x36, 0x0f, 0xc2, 0x37, 0x57, 0x6d, 0x84, 0x9b, 0x26, 0x18, 0xa7, + 0xf1, 0xd1, 0x25, 0x18, 0x75, 0xc2, 0xe6, 0xa6, 0x17, 0x93, 0x66, 0xdc, 0x09, 0x79, 0xca, 0x13, + 0xcd, 0x71, 0x64, 0x41, 0x83, 0x61, 0x03, 0xd3, 0x7e, 0x0f, 0x4e, 0x64, 0x04, 0x02, 0xd0, 0x85, + 0xe3, 0xb4, 0x3d, 0xf9, 0x4d, 0x29, 0xd7, 0xb7, 0x85, 0xfa, 0xaa, 0xfc, 0x1a, 0x0d, 0x8b, 0xae, + 0x4e, 0x66, 0x27, 0xd7, 0x92, 0xb6, 0xaa, 0xd5, 0xb9, 0x22, 0x01, 0x38, 0xc1, 0xb1, 0xff, 0x14, + 0x40, 0xb3, 0xde, 0x14, 0x70, 0x77, 0xba, 0x04, 0xa3, 0x32, 0x0f, 0xb1, 0x96, 0xcc, 0x53, 0x7d, + 0xe6, 0x65, 0x0d, 0x86, 0x0d, 0x4c, 0xda, 0x37, 0x5f, 0xda, 0xa4, 0xd2, 0x8e, 0x76, 0xca, 0x58, + 0x85, 0x13, 0x1c, 0x74, 0x0e, 0x86, 0x23, 0xd2, 0x5a, 0xbf, 0xe6, 0xf9, 0xf7, 0xc4, 0xc2, 0x56, + 0x9c, 0xb9, 0x21, 0xca, 0xb1, 0xc2, 0x40, 0x8b, 0x50, 0xee, 0x78, 0xae, 0x58, 0xca, 0x52, 0x6c, + 0x28, 0xdf, 0x5a, 0xad, 0xed, 0xef, 0xce, 0x3d, 0xd1, 0x2b, 0xbd, 0x32, 0xd5, 0xcf, 0xa3, 0x79, + 0xba, 0xfd, 0x68, 0xe5, 0xac, 0x0b, 0x83, 0xc1, 0x3e, 0x2f, 0x0c, 0x2e, 0x02, 0x88, 0xaf, 0x96, + 0x6b, 0xb9, 0x9c, 0xcc, 0xda, 0x65, 0x05, 0xc1, 0x1a, 0x16, 0xd5, 0xf2, 0x9b, 0x21, 0x71, 0xa4, + 0x22, 0xcc, 0x1d, 0xd4, 0x87, 0x0f, 0xaf, 0xe5, 0x2f, 0xa5, 0x89, 0xe1, 0x6e, 0xfa, 0x28, 0x80, + 0x29, 0x57, 0xc4, 0xf0, 0x26, 0x8d, 0x56, 0xfb, 0xf7, 0x8a, 0x67, 0xbe, 0x3d, 0x69, 0x42, 0xb8, + 0x9b, 0x36, 0xfa, 0x2c, 0xcc, 0xca, 0xc2, 0xee, 0x00, 0x6a, 0xb6, 0x5d, 0xca, 0x8b, 0xa7, 0xf7, + 0x76, 0xe7, 0x66, 0x6b, 0x3d, 0xb1, 0xf0, 0x01, 0x14, 0xd0, 0x9b, 0x30, 0xc8, 0x2e, 0x98, 0xa2, + 0x99, 0x11, 0x76, 0xe2, 0xbd, 0x58, 0x24, 0xb6, 0x82, 0xae, 0xfa, 0x79, 0x76, 0x4d, 0x25, 0xbc, + 0x86, 0x93, 0x5b, 0x3b, 0x56, 0x88, 0x05, 0x4d, 0xd4, 0x86, 0x11, 0xc7, 0xf7, 0x83, 0xd8, 0xe1, + 0x82, 0xd8, 0x68, 0x11, 0x59, 0x52, 0x6b, 0x62, 0x21, 0xa9, 0xcb, 0xdb, 0x51, 0x8e, 0x88, 0x1a, + 0x04, 0xeb, 0x4d, 0xa0, 0xfb, 0x30, 0x11, 0xdc, 0xa7, 0x0c, 0x53, 0xde, 0x88, 0x44, 0x33, 0x63, + 0xe6, 0x87, 0xe5, 0x18, 0x6a, 0x8d, 0xca, 0x1a, 0x27, 0x33, 0x89, 0xe2, 0x74, 0x2b, 0x68, 0xde, + 0x30, 0x57, 0x8f, 0x27, 0xbe, 0xf1, 0x89, 0xb9, 0x5a, 0xb7, 0x4e, 0xb3, 0x20, 0x7d, 0xee, 0x0f, + 0xcb, 0x38, 0xc2, 0x44, 0x2a, 0x48, 0x3f, 0x01, 0x61, 0x1d, 0x0f, 0x6d, 0xc2, 0x68, 0x72, 0xb7, + 0x15, 0x46, 0x2c, 0xff, 0x8f, 0xe6, 0xee, 0x75, 0xf0, 0xc7, 0xad, 0x6a, 0x35, 0x79, 0xa4, 0x8f, + 0x5e, 0x82, 0x0d, 0xca, 0xb3, 0x1f, 0x87, 0x11, 0x6d, 0x8a, 0xfb, 0x71, 0xf7, 0x9e, 0x7d, 0x15, + 0x26, 0xd3, 0x53, 0xd7, 0x97, 0xbb, 0xf8, 0xff, 0x28, 0xc1, 0x44, 0xc6, 0xc5, 0x16, 0xcb, 0xc6, + 0x9c, 0x62, 0xb2, 0x49, 0xf2, 0x65, 0x93, 0x55, 0x96, 0x0a, 0xb0, 0x4a, 0xc9, 0xb7, 0xcb, 0x3d, + 0xf9, 0xb6, 0x60, 0x8f, 0x95, 0xf7, 0xc3, 0x1e, 0xcd, 0x13, 0x69, 0xa0, 0xd0, 0x89, 0xf4, 0x10, + 0x58, 0xaa, 0x71, 0xa8, 0x0d, 0x15, 0x38, 0xd4, 0xbe, 0x5a, 0x82, 0xc9, 0xc4, 0x35, 0x5e, 0xa4, + 0x41, 0x3f, 0xfa, 0x0b, 0x8f, 0x35, 0xe3, 0xc2, 0x23, 0x2f, 0xcb, 0x79, 0xaa, 0x7f, 0x3d, 0x2f, + 0x3f, 0xde, 0x4c, 0x5d, 0x7e, 0xbc, 0xd8, 0x27, 0xdd, 0x83, 0x2f, 0x42, 0xbe, 0x55, 0x82, 0x93, + 0xe9, 0x2a, 0x4b, 0x2d, 0xc7, 0xdb, 0x3a, 0x86, 0xf1, 0xfa, 0xb4, 0x31, 0x5e, 0x2f, 0xf7, 0xf7, + 0x5d, 0xac, 0x93, 0x3d, 0x07, 0xcd, 0x49, 0x0d, 0xda, 0xc7, 0x0f, 0x43, 0xfc, 0xe0, 0x91, 0xfb, + 0x43, 0x0b, 0x1e, 0xcd, 0xac, 0x77, 0x0c, 0x26, 0xde, 0xd7, 0x4d, 0x13, 0xef, 0x0b, 0x87, 0xf8, + 0xba, 0x1e, 0x36, 0xdf, 0x5f, 0x2b, 0xf7, 0xf8, 0x2a, 0x66, 0x04, 0xbb, 0x09, 0x23, 0x4e, 0xb3, + 0x49, 0xa2, 0xe8, 0x7a, 0xe0, 0xaa, 0xc4, 0x62, 0xcf, 0xb3, 0x53, 0x2c, 0x29, 0xde, 0xdf, 0x9d, + 0x9b, 0x4d, 0x93, 0x48, 0xc0, 0x58, 0xa7, 0x60, 0xa6, 0x3c, 0x2c, 0x1d, 0x51, 0xca, 0xc3, 0x8b, + 0x00, 0xdb, 0x4a, 0x5f, 0x4e, 0xdb, 0xd6, 0x34, 0x4d, 0x5a, 0xc3, 0x42, 0x7f, 0x85, 0xc9, 0x9e, + 0xdc, 0x2f, 0xa5, 0x62, 0x46, 0xd9, 0xe6, 0xcc, 0x9f, 0xee, 0xe3, 0xc2, 0x83, 0x79, 0x95, 0x1d, + 0x52, 0x91, 0x44, 0x9f, 0x82, 0xc9, 0x88, 0xe7, 0xa4, 0x58, 0x6a, 0x39, 0x11, 0x8b, 0x09, 0x11, + 0xfc, 0x94, 0xc5, 0xe5, 0x36, 0x52, 0x30, 0xdc, 0x85, 0x6d, 0x7f, 0xb3, 0x0c, 0x1f, 0x3e, 0x60, + 0xd9, 0xa2, 0x05, 0xf3, 0x7e, 0xf8, 0xb9, 0xb4, 0xa5, 0x69, 0x36, 0xb3, 0xb2, 0x61, 0x7a, 0x4a, + 0xcd, 0x76, 0xe9, 0x7d, 0xcf, 0xf6, 0x97, 0x75, 0xbb, 0x20, 0x77, 0x55, 0xbd, 0x7c, 0xe8, 0x8d, + 0xf9, 0xa3, 0x7a, 0x2d, 0xf0, 0x39, 0x0b, 0x9e, 0xc8, 0xfc, 0x2c, 0xc3, 0x1f, 0xe5, 0x3c, 0x54, + 0x9b, 0xb4, 0x50, 0x8b, 0xe0, 0x4a, 0x42, 0x27, 0x25, 0x00, 0x27, 0x38, 0x86, 0xdb, 0x49, 0x29, + 0xd7, 0xed, 0xe4, 0xf7, 0x2c, 0x98, 0x4e, 0x77, 0xe2, 0x18, 0xf8, 0x56, 0xc3, 0xe4, 0x5b, 0xf3, + 0xfd, 0x4d, 0x7e, 0x0f, 0x96, 0xf5, 0xd5, 0x49, 0x38, 0xd5, 0x75, 0xea, 0xf1, 0x51, 0xfc, 0x39, + 0x0b, 0xa6, 0x36, 0x98, 0x9e, 0xa0, 0x85, 0xc9, 0x89, 0xef, 0xca, 0x89, 0x2d, 0x3c, 0x30, 0xba, + 0x8e, 0x6b, 0x3d, 0x5d, 0x28, 0xb8, 0xbb, 0x31, 0xf4, 0x45, 0x0b, 0xa6, 0x9d, 0xfb, 0x51, 0xd7, + 0x23, 0x3d, 0x62, 0x21, 0xbd, 0x9a, 0x63, 0x96, 0xcb, 0x79, 0xde, 0x67, 0x71, 0x66, 0x6f, 0x77, + 0x6e, 0x3a, 0x0b, 0x0b, 0x67, 0xb6, 0x4a, 0xe7, 0x77, 0x53, 0x84, 0xcb, 0x14, 0x0b, 0xf8, 0xcc, + 0x0a, 0xae, 0xe1, 0x6c, 0x4d, 0x42, 0xb0, 0xa2, 0x88, 0xde, 0x86, 0xea, 0x86, 0x8c, 0x8c, 0x4b, + 0xb3, 0xcd, 0x1e, 0xc3, 0x9c, 0x15, 0x48, 0xc7, 0xc3, 0x15, 0x14, 0x08, 0x27, 0x44, 0xd1, 0x15, + 0x28, 0xfb, 0xeb, 0x91, 0x88, 0x41, 0xcf, 0xf3, 0x36, 0x32, 0x7d, 0xbc, 0x78, 0xd8, 0xee, 0x8d, + 0x95, 0x06, 0xa6, 0x24, 0x28, 0xa5, 0xf0, 0xae, 0x2b, 0xec, 0xd1, 0x39, 0x94, 0xf0, 0x62, 0xad, + 0x9b, 0x12, 0x5e, 0xac, 0x61, 0x4a, 0x02, 0xd5, 0x61, 0x80, 0x05, 0xe3, 0x08, 0x63, 0x73, 0x4e, + 0xa2, 0x82, 0xae, 0x90, 0x23, 0x9e, 0x99, 0x93, 0x15, 0x63, 0x4e, 0x08, 0xad, 0xc1, 0x60, 0x93, + 0x3d, 0x2e, 0x21, 0xac, 0x00, 0x79, 0x29, 0x3c, 0xba, 0x1e, 0xa2, 0xe0, 0x37, 0x6c, 0xbc, 0x1c, + 0x0b, 0x5a, 0x8c, 0x2a, 0x69, 0x6f, 0xae, 0x47, 0x42, 0xcd, 0xcf, 0xa3, 0xda, 0xf5, 0x4c, 0x88, + 0xa0, 0xca, 0xca, 0xb1, 0xa0, 0x85, 0x6a, 0x50, 0x5a, 0x6f, 0x8a, 0x58, 0x9d, 0x1c, 0x23, 0xb3, + 0x19, 0x83, 0xbd, 0x38, 0xb8, 0xb7, 0x3b, 0x57, 0x5a, 0x59, 0xc2, 0xa5, 0xf5, 0x26, 0x7a, 0x1d, + 0x86, 0xd6, 0x79, 0x54, 0xad, 0x48, 0xe6, 0x7b, 0x21, 0x2f, 0xf4, 0xb7, 0x2b, 0x04, 0x97, 0x87, + 0xa4, 0x08, 0x00, 0x96, 0xe4, 0x58, 0x9e, 0x43, 0x15, 0x27, 0x2c, 0xb2, 0xf9, 0xce, 0xf7, 0x17, + 0x57, 0x2c, 0xb4, 0x5f, 0x55, 0x8a, 0x35, 0x8a, 0x74, 0xcd, 0x3b, 0xf2, 0x9d, 0x1c, 0x96, 0xc9, + 0x37, 0x77, 0xcd, 0x67, 0x3e, 0xab, 0xc3, 0xd7, 0xbc, 0x02, 0xe1, 0x84, 0x28, 0xea, 0xc0, 0xd8, + 0x76, 0xd4, 0xde, 0x24, 0x72, 0xeb, 0xb3, 0xf4, 0xbe, 0x23, 0x17, 0x3f, 0x99, 0x93, 0xb3, 0x59, + 0x54, 0xf1, 0xc2, 0xb8, 0xe3, 0xb4, 0xba, 0x38, 0x18, 0x4b, 0x2c, 0x77, 0x5b, 0x27, 0x8b, 0xcd, + 0x56, 0xe8, 0x94, 0xbc, 0xdb, 0x09, 0xee, 0xee, 0xc4, 0x44, 0xa4, 0xff, 0xcd, 0x99, 0x92, 0xd7, + 0x38, 0x72, 0xf7, 0x94, 0x08, 0x00, 0x96, 0xe4, 0xd4, 0x90, 0x31, 0x6e, 0x3c, 0x59, 0x78, 0xc8, + 0xba, 0xbe, 0x21, 0x19, 0x32, 0xc6, 0x7d, 0x13, 0xa2, 0x8c, 0xeb, 0xb6, 0x37, 0x83, 0x38, 0xf0, + 0x53, 0xbc, 0x7f, 0xaa, 0x08, 0xd7, 0xad, 0x67, 0xd4, 0xec, 0xe6, 0xba, 0x59, 0x58, 0x38, 0xb3, + 0x55, 0xe4, 0xc3, 0x78, 0x3b, 0x08, 0xe3, 0xfb, 0x41, 0x28, 0xd7, 0x21, 0x2a, 0xa4, 0x23, 0x1a, + 0x75, 0x44, 0xdb, 0xcc, 0xf3, 0xd8, 0x84, 0xe0, 0x14, 0x75, 0x3a, 0x75, 0x51, 0xd3, 0x69, 0x91, + 0xd5, 0x9b, 0x33, 0x27, 0x8a, 0x4c, 0x5d, 0x83, 0x23, 0x77, 0x4f, 0x9d, 0x00, 0x60, 0x49, 0x8e, + 0xf2, 0x3a, 0x96, 0xcb, 0x9e, 0x65, 0x33, 0xce, 0xe5, 0x75, 0x5d, 0xde, 0xb9, 0x9c, 0xd7, 0xb1, + 0x62, 0xcc, 0x09, 0xa1, 0x77, 0xa0, 0x2a, 0x84, 0xdb, 0x20, 0x9a, 0x39, 0xc9, 0xa8, 0xfe, 0x64, + 0x4e, 0x6f, 0x39, 0xfa, 0xcd, 0x46, 0xf6, 0xa9, 0x2f, 0xa2, 0xff, 0x24, 0x12, 0x4e, 0xc8, 0xdb, + 0xbf, 0x3a, 0xd8, 0x2d, 0xf6, 0x30, 0xc5, 0xe6, 0x6f, 0x74, 0xdf, 0x58, 0x7f, 0xaa, 0x7f, 0xfd, + 0xfd, 0x21, 0xde, 0x5d, 0x7f, 0xd1, 0x82, 0x53, 0xed, 0xcc, 0xcf, 0x13, 0x82, 0x43, 0xbf, 0x66, + 0x00, 0x3e, 0x34, 0x2a, 0xc7, 0x78, 0x36, 0x1c, 0xf7, 0x68, 0x33, 0xad, 0x0a, 0x94, 0xdf, 0xb7, + 0x2a, 0x70, 0x07, 0x86, 0x99, 0xec, 0x9a, 0xe4, 0xf7, 0xe9, 0x33, 0x15, 0x0e, 0x13, 0x41, 0x96, + 0x04, 0x09, 0xac, 0x88, 0xd1, 0x81, 0x7b, 0x3c, 0xfd, 0x11, 0x98, 0x30, 0xb0, 0xc8, 0x6c, 0xc9, + 0xf5, 0xac, 0x15, 0x31, 0x12, 0x8f, 0xd7, 0x0f, 0x42, 0xde, 0xcf, 0x43, 0xc0, 0x07, 0x37, 0x86, + 0x6a, 0x19, 0x8a, 0xde, 0xa0, 0x79, 0x3d, 0x95, 0xaf, 0xec, 0x1d, 0xaf, 0x82, 0xf2, 0x0f, 0xad, + 0x0c, 0x79, 0x9a, 0x2b, 0x95, 0x9f, 0x34, 0x95, 0xca, 0xa7, 0xd3, 0x4a, 0x65, 0x97, 0x29, 0xc9, + 0xd0, 0x27, 0x8b, 0x67, 0xe6, 0x2d, 0x9a, 0xc0, 0xc8, 0x6e, 0xc1, 0x99, 0x3c, 0x66, 0xcd, 0x5c, + 0xd6, 0x5c, 0x75, 0x59, 0x9b, 0xb8, 0xac, 0xb9, 0xab, 0x35, 0xcc, 0x20, 0x45, 0x73, 0x60, 0xd8, + 0x3f, 0x5f, 0x82, 0x72, 0x3d, 0x70, 0x8f, 0xc1, 0x34, 0x76, 0xd9, 0x30, 0x8d, 0x3d, 0x95, 0xfb, + 0x50, 0x64, 0x4f, 0x43, 0xd8, 0xcd, 0x94, 0x21, 0xec, 0x27, 0xf2, 0x49, 0x1d, 0x6c, 0xf6, 0xfa, + 0x76, 0x19, 0xf4, 0xa7, 0x2e, 0xd1, 0x7f, 0x38, 0x8c, 0x27, 0x73, 0xb9, 0xd8, 0xeb, 0x97, 0xa2, + 0x0d, 0xe6, 0xf1, 0x26, 0x03, 0x31, 0x7f, 0x64, 0x1d, 0x9a, 0xef, 0x10, 0x6f, 0x63, 0x33, 0x26, + 0x6e, 0xfa, 0xc3, 0x8e, 0xcf, 0xa1, 0xf9, 0x2f, 0x2c, 0x98, 0x48, 0xb5, 0x8e, 0x5a, 0x59, 0x11, + 0x5c, 0x87, 0x34, 0x76, 0x4d, 0xe5, 0x86, 0x7c, 0xcd, 0x03, 0xa8, 0x3b, 0x0b, 0x69, 0x50, 0x62, + 0xb2, 0xb5, 0xba, 0xd4, 0x88, 0xb0, 0x86, 0x81, 0x5e, 0x82, 0x91, 0x38, 0x68, 0x07, 0xad, 0x60, + 0x63, 0xe7, 0x2a, 0x91, 0xd9, 0x59, 0xd4, 0xcd, 0xd2, 0x5a, 0x02, 0xc2, 0x3a, 0x9e, 0xfd, 0x9d, + 0x32, 0xa4, 0x1f, 0x4a, 0xfd, 0xff, 0xeb, 0xf4, 0x47, 0x67, 0x9d, 0xfe, 0xb1, 0x05, 0x93, 0xb4, + 0x75, 0xe6, 0x62, 0x24, 0x1d, 0x8f, 0xd5, 0x33, 0x21, 0xd6, 0x01, 0xcf, 0x84, 0x3c, 0x4d, 0xb9, + 0x9d, 0x1b, 0x74, 0x62, 0x61, 0x02, 0xd3, 0x98, 0x18, 0x2d, 0xc5, 0x02, 0x2a, 0xf0, 0x48, 0x18, + 0x8a, 0x08, 0x2d, 0x1d, 0x8f, 0x84, 0x21, 0x16, 0x50, 0xf9, 0x8a, 0x48, 0xa5, 0xc7, 0x2b, 0x22, + 0x2c, 0xbf, 0x99, 0x70, 0x6b, 0x11, 0x62, 0x85, 0x96, 0xdf, 0x4c, 0xfa, 0xbb, 0x24, 0x38, 0xf6, + 0xd7, 0xcb, 0x30, 0x5a, 0x0f, 0xdc, 0x24, 0xa2, 0xe0, 0x45, 0x23, 0xa2, 0xe0, 0x4c, 0x2a, 0xa2, + 0x60, 0x52, 0xc7, 0x7d, 0x38, 0x01, 0x05, 0x22, 0x0f, 0x1e, 0x7b, 0xe7, 0xe6, 0x90, 0xc1, 0x04, + 0x46, 0x1e, 0x3c, 0x45, 0x08, 0x9b, 0x74, 0x7f, 0x9c, 0x82, 0x08, 0xfe, 0xb7, 0x05, 0xe3, 0xf5, + 0xc0, 0xa5, 0x0b, 0xf4, 0xc7, 0x69, 0x35, 0xea, 0xd9, 0xf3, 0x06, 0x0f, 0xc8, 0x9e, 0xf7, 0xcf, + 0x2d, 0x18, 0xaa, 0x07, 0xee, 0x31, 0x98, 0x87, 0x57, 0x4c, 0xf3, 0xf0, 0x13, 0xb9, 0x9c, 0xb7, + 0x87, 0x45, 0xf8, 0x9b, 0x65, 0x18, 0xa3, 0x3d, 0x0e, 0x36, 0xe4, 0x7c, 0x19, 0x63, 0x63, 0x15, + 0x18, 0x1b, 0x2a, 0x12, 0x06, 0xad, 0x56, 0x70, 0x3f, 0x3d, 0x77, 0x2b, 0xac, 0x14, 0x0b, 0x28, + 0x3a, 0x07, 0xc3, 0xed, 0x90, 0x6c, 0x7b, 0x41, 0x27, 0x4a, 0x47, 0x7b, 0xd6, 0x45, 0x39, 0x56, + 0x18, 0xe8, 0x45, 0x18, 0x8d, 0x3c, 0xbf, 0x49, 0xa4, 0xd3, 0x4b, 0x85, 0x39, 0xbd, 0xf0, 0x44, + 0xa5, 0x5a, 0x39, 0x36, 0xb0, 0xd0, 0x1d, 0xa8, 0xb2, 0xff, 0x6c, 0x07, 0xf5, 0xff, 0x0c, 0x08, + 0x57, 0x87, 0x25, 0x01, 0x9c, 0xd0, 0x42, 0x17, 0x01, 0x62, 0xe9, 0x9e, 0x13, 0x89, 0xb0, 0x64, + 0x25, 0x97, 0x2a, 0xc7, 0x9d, 0x08, 0x6b, 0x58, 0xe8, 0x39, 0xa8, 0xc6, 0x8e, 0xd7, 0xba, 0xe6, + 0xf9, 0x24, 0x12, 0xee, 0x4d, 0x22, 0xe9, 0xb8, 0x28, 0xc4, 0x09, 0x9c, 0x9e, 0xf7, 0x2c, 0xe8, + 0x9d, 0x3f, 0x31, 0x34, 0xcc, 0xb0, 0xd9, 0x79, 0x7f, 0x4d, 0x95, 0x62, 0x0d, 0xc3, 0xbe, 0x04, + 0x27, 0xeb, 0x81, 0x5b, 0x0f, 0xc2, 0x78, 0x25, 0x08, 0xef, 0x3b, 0xa1, 0x2b, 0xe7, 0x6f, 0x4e, + 0xe6, 0xba, 0xa6, 0x67, 0xf2, 0x00, 0xb7, 0x22, 0x18, 0xb9, 0xab, 0x5f, 0x60, 0x27, 0x7e, 0x9f, + 0xa1, 0x2a, 0x7f, 0x54, 0x06, 0x54, 0x67, 0x0e, 0x44, 0xc6, 0x8b, 0x54, 0x9b, 0x30, 0x1e, 0x91, + 0x6b, 0x9e, 0xdf, 0x79, 0x20, 0x48, 0x15, 0x8b, 0x0d, 0x6a, 0x2c, 0xeb, 0x75, 0xb8, 0x9d, 0xc6, + 0x2c, 0xc3, 0x29, 0xba, 0x74, 0x66, 0xc3, 0x8e, 0xbf, 0x10, 0xdd, 0x8a, 0x48, 0x28, 0x5e, 0x60, + 0xfa, 0x38, 0xbb, 0xc6, 0x94, 0x85, 0xfb, 0xbb, 0x73, 0x67, 0x73, 0x9c, 0x33, 0x7c, 0xef, 0x01, + 0xc5, 0x5c, 0xad, 0xe1, 0x84, 0x16, 0x5d, 0x68, 0xec, 0xcf, 0x8d, 0xc0, 0xc7, 0x41, 0x10, 0xcb, + 0xa5, 0xc9, 0x5e, 0xef, 0xd0, 0xca, 0xb1, 0x81, 0x85, 0x22, 0x40, 0x51, 0xa7, 0xdd, 0x6e, 0xb1, + 0x5b, 0x55, 0xa7, 0x75, 0x39, 0x0c, 0x3a, 0x6d, 0xee, 0x73, 0x5e, 0x5e, 0x5c, 0xa2, 0x3c, 0xb8, + 0xd1, 0x05, 0xdd, 0xdf, 0x9d, 0x7b, 0x26, 0xbf, 0x83, 0x0c, 0x77, 0xb5, 0x86, 0x33, 0xc8, 0x23, + 0x0c, 0x43, 0xeb, 0x11, 0xfb, 0x2d, 0x42, 0xeb, 0x2f, 0x31, 0x33, 0x6e, 0x83, 0x15, 0xf5, 0x47, + 0x5e, 0x12, 0xb2, 0x7f, 0x96, 0x1d, 0xb3, 0xec, 0x81, 0x9e, 0xb8, 0x13, 0x12, 0xb4, 0x05, 0x63, + 0x6d, 0x76, 0x94, 0xc6, 0x61, 0xd0, 0x6a, 0x11, 0x29, 0xe5, 0x1e, 0xce, 0x91, 0x8a, 0x3f, 0xc9, + 0xa1, 0x93, 0xc3, 0x26, 0x75, 0xfb, 0xbf, 0x8e, 0x33, 0x8e, 0x29, 0xae, 0xcc, 0x87, 0x84, 0xcb, + 0xb4, 0x90, 0x27, 0x3f, 0x52, 0xe4, 0xa9, 0xbd, 0xe4, 0x34, 0x12, 0x0e, 0xd8, 0x58, 0x52, 0x41, + 0x9f, 0x61, 0x01, 0x01, 0x9c, 0x4d, 0x15, 0x7f, 0x40, 0x94, 0xe3, 0x1b, 0xc1, 0x00, 0x82, 0x04, + 0xd6, 0xc8, 0xa1, 0x6b, 0x30, 0x26, 0xde, 0x73, 0x11, 0xc6, 0x92, 0xb2, 0xa1, 0xe8, 0x8f, 0x61, + 0x1d, 0xb8, 0x9f, 0x2e, 0xc0, 0x66, 0x65, 0xb4, 0x01, 0x8f, 0x6b, 0xef, 0x95, 0x65, 0x38, 0xfd, + 0x71, 0xfe, 0xf7, 0xc4, 0xde, 0xee, 0xdc, 0xe3, 0x6b, 0x07, 0x21, 0xe2, 0x83, 0xe9, 0xa0, 0x9b, + 0x70, 0xd2, 0x69, 0xc6, 0xde, 0x36, 0xa9, 0x11, 0xc7, 0x6d, 0x79, 0x3e, 0x31, 0x93, 0x35, 0x3c, + 0xba, 0xb7, 0x3b, 0x77, 0x72, 0x21, 0x0b, 0x01, 0x67, 0xd7, 0x43, 0x9f, 0x84, 0xaa, 0xeb, 0x47, + 0x62, 0x0c, 0x06, 0x8d, 0xe7, 0xf9, 0xaa, 0xb5, 0x1b, 0x0d, 0xf5, 0xfd, 0xc9, 0x1f, 0x9c, 0x54, + 0x40, 0xef, 0xc2, 0xa8, 0x1e, 0x84, 0x25, 0x9e, 0x85, 0x7c, 0xb9, 0x90, 0x16, 0x6f, 0x44, 0x2e, + 0x71, 0x3b, 0xa2, 0x72, 0xae, 0x35, 0x82, 0x9a, 0x8c, 0x26, 0xd0, 0x4f, 0x03, 0x8a, 0x48, 0xb8, + 0xed, 0x35, 0xc9, 0x42, 0x93, 0xe5, 0x18, 0x66, 0x96, 0xa6, 0x61, 0x23, 0xca, 0x04, 0x35, 0xba, + 0x30, 0x70, 0x46, 0x2d, 0x74, 0x85, 0xf2, 0x3f, 0xbd, 0x54, 0xf8, 0x42, 0x4b, 0xf1, 0x74, 0xa6, + 0x46, 0xda, 0x21, 0x69, 0x3a, 0x31, 0x71, 0x4d, 0x8a, 0x38, 0x55, 0x8f, 0x9e, 0x8e, 0xea, 0x19, + 0x09, 0x30, 0x3d, 0x78, 0xbb, 0x9f, 0x92, 0xa0, 0xda, 0xde, 0x66, 0x10, 0xc5, 0x37, 0x48, 0x7c, + 0x3f, 0x08, 0xef, 0x89, 0xbc, 0x6c, 0x49, 0xc2, 0xc6, 0x04, 0x84, 0x75, 0x3c, 0x2a, 0xc9, 0xb1, + 0x0b, 0xc8, 0xd5, 0x1a, 0xbb, 0xdd, 0x19, 0x4e, 0xf6, 0xce, 0x15, 0x5e, 0x8c, 0x25, 0x5c, 0xa2, + 0xae, 0xd6, 0x97, 0xd8, 0x4d, 0x4d, 0x0a, 0x75, 0xb5, 0xbe, 0x84, 0x25, 0x1c, 0x05, 0xdd, 0x8f, + 0x20, 0x8e, 0x17, 0xb9, 0x35, 0xeb, 0x3e, 0x4f, 0x0a, 0xbe, 0x83, 0xf8, 0x00, 0x26, 0xd5, 0x43, + 0x8c, 0x3c, 0x75, 0x5d, 0x34, 0x33, 0xc1, 0x16, 0xce, 0x61, 0x32, 0xe0, 0x29, 0xeb, 0xe2, 0x6a, + 0x8a, 0x26, 0xee, 0x6a, 0xc5, 0x48, 0x11, 0x32, 0x99, 0xfb, 0x34, 0xc8, 0x79, 0xa8, 0x46, 0x9d, + 0xbb, 0x6e, 0xb0, 0xe5, 0x78, 0x3e, 0xbb, 0x4e, 0xd1, 0x44, 0xa9, 0x86, 0x04, 0xe0, 0x04, 0x07, + 0xd5, 0x61, 0xd8, 0x11, 0x8a, 0xa4, 0xb8, 0xf6, 0xc8, 0xc9, 0x05, 0x20, 0xd5, 0x4e, 0x6e, 0xe3, + 0x95, 0xff, 0xb0, 0xa2, 0x82, 0x5e, 0x81, 0x31, 0x11, 0xca, 0x26, 0x5c, 0x4e, 0x4f, 0x98, 0x61, + 0x0f, 0x0d, 0x1d, 0x88, 0x4d, 0x5c, 0xb4, 0x01, 0xe3, 0x94, 0x4a, 0xc2, 0x00, 0x67, 0xa6, 0xfb, + 0xe3, 0xa1, 0x5a, 0x12, 0x76, 0x9d, 0x0c, 0x4e, 0x91, 0x45, 0x2e, 0x3c, 0xe6, 0x74, 0xe2, 0x60, + 0x8b, 0xee, 0x04, 0x73, 0x9f, 0xac, 0x05, 0xf7, 0x88, 0xcf, 0xee, 0x3a, 0x86, 0x17, 0xcf, 0xec, + 0xed, 0xce, 0x3d, 0xb6, 0x70, 0x00, 0x1e, 0x3e, 0x90, 0x0a, 0x7a, 0x0b, 0x46, 0xe2, 0xa0, 0x25, + 0x3c, 0xc9, 0xa3, 0x99, 0x53, 0x45, 0x52, 0x21, 0xad, 0xa9, 0x0a, 0xba, 0x31, 0x45, 0x11, 0xc1, + 0x3a, 0x45, 0xf4, 0x36, 0x8c, 0xd2, 0xb9, 0xbf, 0xee, 0xb4, 0xdb, 0x9e, 0xbf, 0x11, 0xcd, 0x3c, + 0x52, 0x64, 0xb4, 0x54, 0xa2, 0x4f, 0x73, 0xff, 0xb2, 0x22, 0x12, 0x61, 0x83, 0xe2, 0xec, 0x4f, + 0xc1, 0x54, 0x17, 0xd3, 0xeb, 0xcb, 0xc9, 0xf6, 0x3f, 0x0e, 0x40, 0x55, 0x59, 0x2e, 0xd1, 0x79, + 0xd3, 0x48, 0xfd, 0x68, 0xda, 0x48, 0x3d, 0x4c, 0x05, 0x45, 0xdd, 0x2e, 0xfd, 0xd9, 0x8c, 0xc7, + 0xfd, 0x9f, 0xcd, 0xdd, 0xe5, 0xc5, 0x23, 0xec, 0x34, 0x55, 0xb3, 0x5c, 0xd8, 0xee, 0x5d, 0x39, + 0x50, 0x7b, 0x2d, 0xf8, 0x60, 0x25, 0xd5, 0x53, 0xdb, 0x81, 0xbb, 0x5a, 0x4f, 0xbf, 0xc7, 0x56, + 0xa7, 0x85, 0x98, 0xc3, 0x98, 0x7e, 0x41, 0x4f, 0x6d, 0xa6, 0x5f, 0x0c, 0x1d, 0x52, 0xbf, 0x90, + 0x04, 0x70, 0x42, 0x0b, 0x6d, 0xc3, 0x54, 0xd3, 0x7c, 0x5e, 0x4f, 0xc5, 0xcd, 0x3d, 0xdf, 0xc7, + 0xf3, 0x76, 0x1d, 0xed, 0x65, 0x9c, 0xa5, 0x34, 0x3d, 0xdc, 0xdd, 0x04, 0x7a, 0x05, 0x86, 0xdf, + 0x0d, 0x22, 0x76, 0x7d, 0x22, 0x8e, 0x2e, 0x19, 0x9f, 0x34, 0xfc, 0xda, 0xcd, 0x06, 0x2b, 0xdf, + 0xdf, 0x9d, 0x1b, 0xa9, 0x07, 0xae, 0xfc, 0x8b, 0x55, 0x05, 0xf4, 0x39, 0x0b, 0x4e, 0x1a, 0x3b, + 0x59, 0xf5, 0x1c, 0x0e, 0xd3, 0xf3, 0xc7, 0x45, 0xcb, 0x27, 0x57, 0xb3, 0x68, 0xe2, 0xec, 0xa6, + 0xec, 0xdf, 0xe1, 0xa6, 0x5a, 0x61, 0xbc, 0x21, 0x51, 0xa7, 0x75, 0x1c, 0xaf, 0x54, 0xdc, 0x34, + 0xec, 0x4a, 0x0f, 0xe1, 0xb2, 0xe0, 0xdf, 0x5b, 0xec, 0xb2, 0x60, 0x8d, 0x6c, 0xb5, 0x5b, 0x4e, + 0x7c, 0x1c, 0x3e, 0xd8, 0x9f, 0x81, 0xe1, 0x58, 0xb4, 0x56, 0xec, 0x89, 0x0d, 0xad, 0x7b, 0xec, + 0x12, 0x45, 0x1d, 0x7d, 0xb2, 0x14, 0x2b, 0x82, 0xf6, 0xbf, 0xe2, 0xb3, 0x22, 0x21, 0xc7, 0x60, + 0x11, 0xb9, 0x61, 0x5a, 0x44, 0x9e, 0x29, 0xfc, 0x2d, 0x3d, 0x2c, 0x23, 0xdf, 0x31, 0xbf, 0x80, + 0x69, 0x28, 0x3f, 0x3a, 0xb7, 0x59, 0xf6, 0xaf, 0x58, 0x30, 0x9d, 0xe5, 0x14, 0x41, 0x45, 0x18, + 0xae, 0x1f, 0xa9, 0x7b, 0x3e, 0x35, 0xaa, 0xb7, 0x45, 0x39, 0x56, 0x18, 0x85, 0x73, 0xde, 0xf7, + 0x97, 0xca, 0xeb, 0x26, 0x98, 0x0f, 0x35, 0xa2, 0x57, 0x79, 0xc8, 0x85, 0xa5, 0x5e, 0x52, 0xec, + 0x2f, 0xdc, 0xc2, 0xfe, 0x46, 0x09, 0xa6, 0xb9, 0xb1, 0x7d, 0x61, 0x3b, 0xf0, 0xdc, 0x7a, 0xe0, + 0x8a, 0x00, 0x14, 0x17, 0x46, 0xdb, 0x9a, 0x7a, 0x5b, 0x2c, 0x35, 0x90, 0xae, 0x10, 0x27, 0x2a, + 0x85, 0x5e, 0x8a, 0x0d, 0xaa, 0xb4, 0x15, 0xb2, 0xed, 0x35, 0x95, 0xed, 0xb6, 0xd4, 0xf7, 0xc9, + 0xa0, 0x5a, 0x59, 0xd6, 0xe8, 0x60, 0x83, 0xea, 0x11, 0x3c, 0x55, 0x63, 0xff, 0x7d, 0x0b, 0x1e, + 0xe9, 0x91, 0x3e, 0x88, 0x36, 0x77, 0x9f, 0x5d, 0x70, 0x88, 0x97, 0x40, 0x55, 0x73, 0xfc, 0xda, + 0x03, 0x0b, 0x28, 0xba, 0x0b, 0xc0, 0xaf, 0x2d, 0xa8, 0x34, 0x9d, 0xbe, 0x53, 0x2f, 0x98, 0xa4, + 0x43, 0xcb, 0xdf, 0x20, 0x29, 0x61, 0x8d, 0xaa, 0xfd, 0xb5, 0x32, 0x0c, 0xf0, 0x07, 0xe7, 0xeb, + 0x30, 0xb4, 0xc9, 0xf3, 0x2a, 0xf7, 0x97, 0xd6, 0x39, 0x51, 0x5f, 0x78, 0x01, 0x96, 0x64, 0xd0, + 0x75, 0x38, 0x21, 0x42, 0xa0, 0x6a, 0xa4, 0xe5, 0xec, 0x48, 0x7d, 0x98, 0xbf, 0x5f, 0x22, 0x13, + 0xed, 0x9f, 0x58, 0xed, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0xab, 0x5d, 0x69, 0x10, 0x79, 0xbe, 0x6a, + 0x25, 0x0b, 0xe7, 0xa4, 0x42, 0x7c, 0x05, 0xc6, 0xda, 0x5d, 0x9a, 0xbf, 0xf6, 0xae, 0xb7, 0xa9, + 0xed, 0x9b, 0xb8, 0xcc, 0x87, 0xa2, 0xc3, 0x7c, 0x47, 0xd6, 0x36, 0x43, 0x12, 0x6d, 0x06, 0x2d, + 0x57, 0x3c, 0x49, 0x9b, 0xf8, 0x50, 0xa4, 0xe0, 0xb8, 0xab, 0x06, 0xa5, 0xb2, 0xee, 0x78, 0xad, + 0x4e, 0x48, 0x12, 0x2a, 0x83, 0x26, 0x95, 0x95, 0x14, 0x1c, 0x77, 0xd5, 0xa0, 0x6b, 0xeb, 0xa4, + 0x78, 0xc5, 0x54, 0x06, 0xcb, 0x0b, 0x16, 0xf4, 0x69, 0x18, 0x92, 0x81, 0x0c, 0x85, 0x72, 0xba, + 0x08, 0x07, 0x09, 0xf5, 0x22, 0xaa, 0xf6, 0x9e, 0x9d, 0x08, 0x61, 0x90, 0xf4, 0x0e, 0xf3, 0x5a, + 0xe6, 0x9f, 0x5b, 0x70, 0x22, 0xc3, 0x21, 0x8f, 0xb3, 0xb4, 0x0d, 0x2f, 0x8a, 0xd5, 0x6b, 0x1a, + 0x1a, 0x4b, 0xe3, 0xe5, 0x58, 0x61, 0xd0, 0xdd, 0xc2, 0x99, 0x66, 0x9a, 0x51, 0x0a, 0x57, 0x17, + 0x01, 0xed, 0x8f, 0x51, 0xa2, 0x33, 0x50, 0xe9, 0x44, 0x24, 0x94, 0x0f, 0x4b, 0x4a, 0x3e, 0x7f, + 0x2b, 0x22, 0x21, 0x66, 0x10, 0x2a, 0xb6, 0x6e, 0x28, 0x8b, 0xa0, 0x26, 0xb6, 0x32, 0xeb, 0x1e, + 0xe6, 0x30, 0xfb, 0xcb, 0x65, 0x98, 0x48, 0x39, 0xe6, 0xd2, 0x8e, 0x6c, 0x05, 0xbe, 0x17, 0x07, + 0x2a, 0xcf, 0x1e, 0x7f, 0xcb, 0x8e, 0xb4, 0x37, 0xaf, 0x8b, 0x72, 0xac, 0x30, 0xd0, 0xd3, 0xf2, + 0xb5, 0xe2, 0xf4, 0x2b, 0x21, 0x8b, 0x35, 0xe3, 0xc1, 0xe2, 0xa2, 0x2f, 0xfc, 0x3c, 0x09, 0x95, + 0x76, 0xa0, 0x1e, 0x9f, 0x57, 0xf3, 0x89, 0x17, 0x6b, 0xf5, 0x20, 0x68, 0x61, 0x06, 0x44, 0x4f, + 0x89, 0xaf, 0x4f, 0xdd, 0xd0, 0x60, 0xc7, 0x0d, 0x22, 0x6d, 0x08, 0x9e, 0x81, 0xa1, 0x7b, 0x64, + 0x27, 0xf4, 0xfc, 0x8d, 0xf4, 0xfd, 0xd4, 0x55, 0x5e, 0x8c, 0x25, 0xdc, 0x4c, 0x9a, 0x3f, 0x74, + 0xc4, 0xaf, 0xf8, 0x0c, 0xe7, 0x9e, 0x83, 0xdf, 0xb4, 0x60, 0x82, 0x65, 0xc1, 0x15, 0xa9, 0x1a, + 0xbc, 0xc0, 0x3f, 0x06, 0x19, 0xe3, 0x49, 0x18, 0x08, 0x69, 0xa3, 0xe9, 0x67, 0x38, 0x58, 0x4f, + 0x30, 0x87, 0xa1, 0xc7, 0xa0, 0xc2, 0xba, 0x40, 0xa7, 0x71, 0x94, 0x27, 0xdb, 0xaf, 0x39, 0xb1, + 0x83, 0x59, 0x29, 0x8b, 0x85, 0xc3, 0xa4, 0xdd, 0xf2, 0x78, 0xa7, 0x13, 0x83, 0xee, 0x07, 0x2d, + 0x16, 0x2e, 0xb3, 0x93, 0x0f, 0x2b, 0x16, 0x2e, 0x9b, 0xf8, 0xc1, 0x72, 0xfe, 0x7f, 0x2b, 0xc1, + 0xe9, 0xcc, 0x7a, 0xc9, 0x4d, 0xf7, 0x8a, 0x71, 0xd3, 0x7d, 0x31, 0x75, 0xd3, 0x6d, 0x1f, 0x5c, + 0xfb, 0xe1, 0xdc, 0x7d, 0x67, 0x5f, 0x49, 0x97, 0x8f, 0xf1, 0x4a, 0xba, 0x52, 0x54, 0xc4, 0x19, + 0xc8, 0x11, 0x71, 0xfe, 0xd0, 0x82, 0x47, 0x33, 0x87, 0xec, 0x03, 0x17, 0x7c, 0x98, 0xd9, 0xcb, + 0x1e, 0xda, 0xc9, 0x2f, 0x97, 0x7b, 0x7c, 0x15, 0xd3, 0x53, 0xce, 0x52, 0x2e, 0xc4, 0x80, 0x91, + 0x10, 0xde, 0x46, 0x39, 0x07, 0xe2, 0x65, 0x58, 0x41, 0x51, 0xa4, 0x05, 0xef, 0xf1, 0x4e, 0x2e, + 0x1f, 0x72, 0x43, 0xcd, 0x9b, 0x96, 0x78, 0x3d, 0xff, 0x44, 0x3a, 0xa4, 0xef, 0x8e, 0xa6, 0x79, + 0x96, 0x0f, 0xa3, 0x79, 0x8e, 0x66, 0x6b, 0x9d, 0x68, 0x01, 0x26, 0xb6, 0x3c, 0x9f, 0x3d, 0xfe, + 0x6b, 0x4a, 0x4f, 0x2a, 0x82, 0xfa, 0xba, 0x09, 0xc6, 0x69, 0xfc, 0xd9, 0x57, 0x60, 0xec, 0xf0, + 0xd6, 0xb5, 0x1f, 0x94, 0xe1, 0xc3, 0x07, 0x30, 0x05, 0x7e, 0x3a, 0x18, 0xf3, 0xa2, 0x9d, 0x0e, + 0x5d, 0x73, 0x53, 0x87, 0xe9, 0xf5, 0x4e, 0xab, 0xb5, 0xc3, 0xfc, 0xc4, 0x88, 0x2b, 0x31, 0x84, + 0x50, 0xa3, 0x92, 0x62, 0xaf, 0x64, 0xe0, 0xe0, 0xcc, 0x9a, 0xe8, 0xa7, 0x01, 0x05, 0x77, 0x59, + 0x7a, 0x66, 0x37, 0xc9, 0xaf, 0xc1, 0xa6, 0xa0, 0x9c, 0x6c, 0xd5, 0x9b, 0x5d, 0x18, 0x38, 0xa3, + 0x16, 0x95, 0x53, 0xe9, 0x39, 0xb6, 0xa3, 0xba, 0x95, 0x92, 0x53, 0xb1, 0x0e, 0xc4, 0x26, 0x2e, + 0xba, 0x0c, 0x53, 0xce, 0xb6, 0xe3, 0xf1, 0x74, 0x6b, 0x92, 0x00, 0x17, 0x54, 0x95, 0xfd, 0x6a, + 0x21, 0x8d, 0x80, 0xbb, 0xeb, 0xa0, 0xb6, 0x61, 0x90, 0xe4, 0x2f, 0x44, 0x7c, 0xf2, 0x10, 0x2b, + 0xb8, 0xb0, 0x89, 0xd2, 0xfe, 0x53, 0x8b, 0x1e, 0x7d, 0x19, 0xef, 0xc4, 0xd2, 0x11, 0x51, 0x06, + 0x36, 0x2d, 0x18, 0x51, 0x8d, 0xc8, 0x92, 0x0e, 0xc4, 0x26, 0x2e, 0x5f, 0x1a, 0x51, 0xe2, 0xb6, + 0x6e, 0x48, 0x9b, 0x22, 0x8e, 0x57, 0x61, 0x50, 0x09, 0xda, 0xf5, 0xb6, 0xbd, 0x28, 0x08, 0xc5, + 0x06, 0xea, 0xf7, 0x35, 0x76, 0xc5, 0x2f, 0x6b, 0x9c, 0x0c, 0x96, 0xf4, 0xec, 0xaf, 0x94, 0x60, + 0x4c, 0xb6, 0xf8, 0x5a, 0x27, 0x88, 0x9d, 0x63, 0x38, 0xd2, 0x5f, 0x33, 0x8e, 0xf4, 0xf3, 0xc5, + 0xc2, 0x9a, 0x59, 0xe7, 0x7a, 0x1e, 0xe5, 0x9f, 0x4e, 0x1d, 0xe5, 0x17, 0xfa, 0x21, 0x7a, 0xf0, + 0x11, 0xfe, 0x6f, 0x2c, 0x98, 0x32, 0xf0, 0x8f, 0xe1, 0x24, 0xa9, 0x9b, 0x27, 0xc9, 0x73, 0x7d, + 0x7c, 0x4d, 0x8f, 0x13, 0xe4, 0xeb, 0xa5, 0xd4, 0x57, 0xb0, 0x93, 0xe3, 0x67, 0xa0, 0xb2, 0xe9, + 0x84, 0x6e, 0xb1, 0xdc, 0xa3, 0x5d, 0xd5, 0xe7, 0xaf, 0x38, 0xa1, 0xcb, 0xf9, 0xff, 0x39, 0xf5, + 0x8a, 0x9d, 0x13, 0xba, 0xb9, 0xd1, 0x1c, 0xac, 0x51, 0x74, 0x09, 0x06, 0xa3, 0x66, 0xd0, 0x56, + 0xfe, 0xae, 0x67, 0xf8, 0x0b, 0x77, 0xb4, 0x64, 0x7f, 0x77, 0x0e, 0x99, 0xcd, 0xd1, 0x62, 0x2c, + 0xf0, 0x67, 0x37, 0xa0, 0xaa, 0x9a, 0x3e, 0x52, 0x8f, 0xff, 0xff, 0x52, 0x86, 0x13, 0x19, 0x6b, + 0x05, 0xfd, 0xac, 0x31, 0x6e, 0xaf, 0xf4, 0xbd, 0xd8, 0xde, 0xe7, 0xc8, 0xfd, 0x2c, 0xd3, 0x94, + 0x5c, 0xb1, 0x3a, 0x0e, 0xd1, 0xfc, 0xad, 0x88, 0xa4, 0x9b, 0xa7, 0x45, 0xf9, 0xcd, 0xd3, 0x66, + 0x8f, 0x6d, 0xf8, 0x69, 0x43, 0xaa, 0xa7, 0x47, 0x3a, 0xcf, 0x5f, 0xa8, 0xc0, 0x74, 0x56, 0xfe, + 0x04, 0xf4, 0x0b, 0x56, 0xea, 0xa5, 0x93, 0x57, 0xfb, 0x4f, 0xc2, 0xc0, 0x9f, 0x3f, 0x11, 0xd9, + 0x8d, 0xe6, 0xcd, 0xb7, 0x4f, 0x72, 0x47, 0x5c, 0xb4, 0xce, 0xe2, 0xb0, 0x42, 0xfe, 0x6a, 0x8d, + 0xe4, 0x0a, 0x9f, 0x3a, 0x44, 0x57, 0xc4, 0xc3, 0x37, 0x51, 0x2a, 0x0e, 0x4b, 0x16, 0xe7, 0xc7, + 0x61, 0xc9, 0x3e, 0xcc, 0x7a, 0x30, 0xa2, 0x7d, 0xd7, 0x91, 0x2e, 0x83, 0x7b, 0xf4, 0x88, 0xd2, + 0xfa, 0x7d, 0xa4, 0x4b, 0xe1, 0xef, 0x58, 0x90, 0x72, 0x4e, 0x53, 0x66, 0x19, 0xab, 0xa7, 0x59, + 0xe6, 0x0c, 0x54, 0xc2, 0xa0, 0x45, 0xd2, 0x8f, 0x5f, 0xe0, 0xa0, 0x45, 0x30, 0x83, 0xa8, 0x87, + 0xad, 0xcb, 0xbd, 0x1e, 0xb6, 0xa6, 0x7a, 0x7a, 0x8b, 0x6c, 0x13, 0x69, 0x24, 0x51, 0x6c, 0xfc, + 0x1a, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x66, 0x05, 0x4e, 0x64, 0xc4, 0x24, 0x52, 0x0d, 0x69, 0xc3, + 0x89, 0xc9, 0x7d, 0x67, 0x27, 0x9d, 0x84, 0xf7, 0x32, 0x2f, 0xc6, 0x12, 0xce, 0x9c, 0x6a, 0x79, + 0x22, 0xbf, 0x94, 0xe9, 0x4a, 0xe4, 0xef, 0x13, 0xd0, 0xa3, 0x7f, 0x02, 0xf9, 0x22, 0x40, 0x14, + 0xb5, 0x96, 0x7d, 0x2a, 0xe1, 0xb9, 0xc2, 0x79, 0x37, 0xc9, 0xff, 0xd8, 0xb8, 0x26, 0x20, 0x58, + 0xc3, 0x42, 0x35, 0x98, 0x6c, 0x87, 0x41, 0xcc, 0x0d, 0x83, 0x35, 0xee, 0x6a, 0x31, 0x60, 0x46, + 0x8d, 0xd5, 0x53, 0x70, 0xdc, 0x55, 0x03, 0xbd, 0x04, 0x23, 0x22, 0x92, 0xac, 0x1e, 0x04, 0x2d, + 0x61, 0x46, 0x52, 0xf7, 0xf1, 0x8d, 0x04, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0xb3, 0x36, 0x0e, 0x65, + 0x56, 0xe3, 0x16, 0x47, 0x0d, 0x2f, 0x95, 0x65, 0x65, 0xb8, 0x50, 0x96, 0x95, 0xc4, 0xb0, 0x56, + 0x2d, 0x7c, 0x11, 0x03, 0xb9, 0x06, 0xa8, 0xdf, 0x2f, 0xc3, 0x20, 0x9f, 0x8a, 0x63, 0x90, 0xf2, + 0xea, 0xc2, 0xa4, 0x54, 0x28, 0xa3, 0x05, 0xef, 0xd5, 0x7c, 0xcd, 0x89, 0x1d, 0xce, 0x9a, 0xd4, + 0x0e, 0x49, 0xcc, 0x50, 0x68, 0xde, 0xd8, 0x43, 0xb3, 0x29, 0x4b, 0x09, 0x70, 0x1a, 0xda, 0x8e, + 0xda, 0x04, 0x88, 0xd8, 0x33, 0xbc, 0x94, 0x86, 0xc8, 0x10, 0xfc, 0x62, 0xa1, 0x7e, 0x34, 0x54, + 0x35, 0xde, 0x9b, 0x64, 0x59, 0x2a, 0x00, 0xd6, 0x68, 0xcf, 0xbe, 0x0c, 0x55, 0x85, 0x9c, 0xa7, + 0x42, 0x8e, 0xea, 0xac, 0xed, 0x27, 0x61, 0x22, 0xd5, 0x56, 0x5f, 0x1a, 0xe8, 0x6f, 0x5b, 0x30, + 0xc1, 0xbb, 0xbc, 0xec, 0x6f, 0x0b, 0x56, 0xf0, 0x79, 0x0b, 0xa6, 0x5b, 0x19, 0x3b, 0x51, 0x4c, + 0xf3, 0x61, 0xf6, 0xb0, 0x52, 0x3e, 0xb3, 0xa0, 0x38, 0xb3, 0x35, 0x74, 0x16, 0x86, 0xf9, 0xab, + 0xe2, 0x4e, 0x4b, 0x78, 0x8a, 0x8f, 0xf2, 0xdc, 0xe8, 0xbc, 0x0c, 0x2b, 0xa8, 0xfd, 0x43, 0x0b, + 0xa6, 0xf8, 0x47, 0x5c, 0x25, 0x3b, 0x4a, 0xbd, 0xfa, 0x80, 0x7c, 0x86, 0xc8, 0x02, 0x5f, 0xea, + 0x91, 0x05, 0x5e, 0xff, 0xca, 0xf2, 0x81, 0x5f, 0xf9, 0x0d, 0x0b, 0xc4, 0x0a, 0x3d, 0x06, 0xfd, + 0x61, 0xd5, 0xd4, 0x1f, 0x3e, 0x52, 0x64, 0xd1, 0xf7, 0x50, 0x1c, 0x7e, 0xa9, 0x04, 0x93, 0x1c, + 0x21, 0xb9, 0x91, 0xf9, 0xa0, 0x4c, 0x4e, 0x7f, 0xaf, 0x13, 0xa9, 0xb7, 0x69, 0xb3, 0xbf, 0xd4, + 0x98, 0xcb, 0xca, 0x81, 0x73, 0xf9, 0x3f, 0x2d, 0x40, 0x7c, 0x4c, 0xd2, 0x4f, 0xb2, 0xf3, 0xd3, + 0x4d, 0x33, 0x07, 0x24, 0x9c, 0x43, 0x41, 0xb0, 0x86, 0xf5, 0x90, 0x3f, 0x21, 0x75, 0x1f, 0x56, + 0xce, 0xbf, 0x0f, 0xeb, 0xe3, 0xab, 0xff, 0x7b, 0x19, 0xd2, 0xce, 0x9a, 0xe8, 0x6d, 0x18, 0x6d, + 0x3a, 0x6d, 0xe7, 0xae, 0xd7, 0xf2, 0x62, 0x8f, 0x44, 0xc5, 0x2e, 0xdc, 0x97, 0xb4, 0x1a, 0xe2, + 0x1a, 0x4a, 0x2b, 0xc1, 0x06, 0x45, 0x34, 0x0f, 0xd0, 0x0e, 0xbd, 0x6d, 0xaf, 0x45, 0x36, 0x98, + 0xc6, 0xc3, 0x62, 0x4e, 0xf8, 0xdd, 0xb1, 0x2c, 0xc5, 0x1a, 0x46, 0x46, 0x8c, 0x42, 0xf9, 0x38, + 0x62, 0x14, 0x2a, 0x47, 0x18, 0xa3, 0x30, 0x50, 0x28, 0x46, 0x01, 0xc3, 0x29, 0x79, 0xd0, 0xd3, + 0xff, 0x2b, 0x5e, 0x8b, 0x08, 0x39, 0x8f, 0xc7, 0xaf, 0xcc, 0xee, 0xed, 0xce, 0x9d, 0xc2, 0x99, + 0x18, 0xb8, 0x47, 0x4d, 0xbb, 0x03, 0x27, 0x1a, 0x24, 0x94, 0x4f, 0xef, 0xa9, 0x7d, 0xf7, 0x59, + 0xa8, 0x86, 0xa9, 0x2d, 0xdf, 0x67, 0x92, 0x02, 0x2d, 0x2f, 0x9d, 0xdc, 0xe2, 0x09, 0x49, 0xfb, + 0xaf, 0x97, 0x60, 0x48, 0xb8, 0x74, 0x1e, 0x83, 0xa0, 0x72, 0xd5, 0x30, 0x47, 0x3d, 0x93, 0xc7, + 0x2b, 0x59, 0xb7, 0x7a, 0x1a, 0xa2, 0x1a, 0x29, 0x43, 0xd4, 0x73, 0xc5, 0xc8, 0x1d, 0x6c, 0x82, + 0xfa, 0x27, 0x65, 0x18, 0x37, 0x5d, 0x5c, 0x8f, 0x61, 0x58, 0x5e, 0x87, 0xa1, 0x48, 0x78, 0x5b, + 0x97, 0x8a, 0xf8, 0xf7, 0xa5, 0xa7, 0x38, 0xb9, 0xb5, 0x17, 0xfe, 0xd5, 0x92, 0x5c, 0xa6, 0x43, + 0x77, 0xf9, 0x58, 0x1c, 0xba, 0xf3, 0x3c, 0x8f, 0x2b, 0x0f, 0xc3, 0xf3, 0xd8, 0xfe, 0x2e, 0x3b, + 0x1e, 0xf4, 0xf2, 0x63, 0x38, 0xf2, 0x5f, 0x33, 0x0f, 0x92, 0x73, 0x85, 0xd6, 0x9d, 0xe8, 0x5e, + 0x8f, 0xa3, 0xff, 0x5b, 0x16, 0x8c, 0x08, 0xc4, 0x63, 0xf8, 0x80, 0x9f, 0x36, 0x3f, 0xe0, 0xa9, + 0x42, 0x1f, 0xd0, 0xa3, 0xe7, 0x5f, 0x29, 0xa9, 0x9e, 0xd7, 0x83, 0x30, 0x2e, 0x94, 0xbd, 0x7d, + 0x98, 0xaa, 0x89, 0x41, 0x33, 0x68, 0x09, 0x61, 0xef, 0xb1, 0x24, 0x5c, 0x91, 0x97, 0xef, 0x6b, + 0xbf, 0xb1, 0xc2, 0x66, 0xd1, 0x74, 0x41, 0x18, 0x8b, 0xc3, 0x36, 0x89, 0xa6, 0x0b, 0xc2, 0x18, + 0x33, 0x08, 0x72, 0x01, 0x62, 0x27, 0xdc, 0x20, 0x31, 0x2d, 0x13, 0x91, 0xbe, 0xbd, 0x77, 0x6b, + 0x27, 0xf6, 0x5a, 0xf3, 0x9e, 0x1f, 0x47, 0x71, 0x38, 0xbf, 0xea, 0xc7, 0x37, 0x43, 0xae, 0x20, + 0x68, 0xf1, 0x87, 0x8a, 0x16, 0xd6, 0xe8, 0xca, 0x90, 0x12, 0xd6, 0xc6, 0x80, 0x79, 0xdb, 0x74, + 0x43, 0x94, 0x63, 0x85, 0x61, 0xbf, 0xcc, 0x38, 0x3b, 0x1b, 0xa0, 0xfe, 0x42, 0x03, 0xbf, 0x30, + 0xa4, 0x86, 0x96, 0x99, 0x90, 0x6f, 0xe8, 0x01, 0x88, 0x45, 0xd9, 0x27, 0xed, 0x82, 0xee, 0x73, + 0x9d, 0xc4, 0x2b, 0x22, 0xd2, 0x75, 0x45, 0xf9, 0x72, 0x61, 0x8e, 0xdc, 0xc7, 0xa5, 0x24, 0x4b, + 0x23, 0xc9, 0x72, 0xe7, 0xad, 0xd6, 0xd3, 0x39, 0xf7, 0x97, 0x24, 0x00, 0x27, 0x38, 0xe8, 0xbc, + 0x50, 0x3e, 0xb9, 0x75, 0xe6, 0xc3, 0x29, 0xe5, 0x53, 0x0e, 0x89, 0xa6, 0x7d, 0x5e, 0x80, 0x11, + 0xf5, 0x8c, 0x51, 0x9d, 0x3f, 0x20, 0x53, 0xe5, 0xb2, 0xd8, 0x72, 0x52, 0x8c, 0x75, 0x1c, 0xb4, + 0x06, 0x13, 0x11, 0x7f, 0x63, 0x49, 0xc6, 0x76, 0x08, 0x23, 0xc3, 0xb3, 0xf2, 0x42, 0xb3, 0x61, + 0x82, 0xf7, 0x59, 0x11, 0xdf, 0xca, 0x32, 0x1a, 0x24, 0x4d, 0x02, 0xbd, 0x0a, 0xe3, 0x2d, 0xfd, + 0xdd, 0xd9, 0xba, 0xb0, 0x41, 0x28, 0x17, 0x35, 0xe3, 0x55, 0xda, 0x3a, 0x4e, 0x61, 0xa3, 0xd7, + 0x61, 0x46, 0x2f, 0x11, 0x09, 0x91, 0x1c, 0x7f, 0x83, 0x44, 0xe2, 0x3d, 0x96, 0xc7, 0xf6, 0x76, + 0xe7, 0x66, 0xae, 0xf5, 0xc0, 0xc1, 0x3d, 0x6b, 0xa3, 0x4b, 0x30, 0x2a, 0x3f, 0x5f, 0x8b, 0x84, + 0x4a, 0x9c, 0x23, 0x35, 0x18, 0x36, 0x30, 0xd1, 0x7d, 0x38, 0x29, 0xff, 0xaf, 0x85, 0xce, 0xfa, + 0xba, 0xd7, 0x14, 0x21, 0x69, 0x23, 0x8c, 0xc4, 0x82, 0xf4, 0x2d, 0x5f, 0xce, 0x42, 0xda, 0xdf, + 0x9d, 0x3b, 0x23, 0x46, 0x2d, 0x13, 0xce, 0x26, 0x31, 0x9b, 0x3e, 0xba, 0x0e, 0x27, 0x36, 0x89, + 0xd3, 0x8a, 0x37, 0x97, 0x36, 0x49, 0xf3, 0x9e, 0xdc, 0x58, 0x2c, 0xbe, 0x4a, 0x73, 0x1f, 0xbc, + 0xd2, 0x8d, 0x82, 0xb3, 0xea, 0xbd, 0xbf, 0xfb, 0xe7, 0x9f, 0xa1, 0x95, 0x35, 0xf9, 0x01, 0xbd, + 0x03, 0xa3, 0xfa, 0x58, 0xa7, 0x05, 0x83, 0xfc, 0x37, 0x89, 0x85, 0x1c, 0xa2, 0x66, 0x40, 0x87, + 0x61, 0x83, 0xb6, 0xfd, 0xef, 0x4a, 0x30, 0x97, 0x93, 0x6f, 0x2c, 0x65, 0xf9, 0xb2, 0x0a, 0x59, + 0xbe, 0x16, 0xe4, 0x3b, 0x3d, 0x37, 0x52, 0x79, 0xde, 0x53, 0x2f, 0xef, 0x24, 0xd9, 0xde, 0xd3, + 0xf8, 0x85, 0xbd, 0xd2, 0x74, 0xe3, 0x59, 0x25, 0xd7, 0x39, 0xef, 0x0d, 0xdd, 0x1e, 0x3a, 0x70, + 0x18, 0xa1, 0xb7, 0xa7, 0x29, 0xd4, 0xfe, 0x6e, 0x09, 0x4e, 0xaa, 0xc1, 0xfc, 0xf1, 0x1d, 0xc2, + 0xb7, 0xba, 0x87, 0xf0, 0xa1, 0x9a, 0x94, 0xed, 0x9b, 0x30, 0xd8, 0xd8, 0x89, 0x9a, 0x71, 0xab, + 0xc0, 0x89, 0xff, 0xa4, 0xb1, 0xaf, 0x92, 0xd3, 0x88, 0xbd, 0xbe, 0x27, 0xb6, 0x99, 0xfd, 0x8b, + 0x16, 0x4c, 0xac, 0x2d, 0xd5, 0x1b, 0x41, 0xf3, 0x1e, 0x89, 0x17, 0xb8, 0xf1, 0x03, 0x8b, 0x03, + 0xdf, 0x3a, 0xe4, 0x41, 0x9e, 0x25, 0x22, 0x9c, 0x81, 0xca, 0x66, 0x10, 0xc5, 0xe9, 0x0b, 0x84, + 0x2b, 0x41, 0x14, 0x63, 0x06, 0xb1, 0xff, 0xcc, 0x82, 0x01, 0xf6, 0xb8, 0x5c, 0xde, 0xc3, 0x84, + 0x45, 0xbe, 0x0b, 0xbd, 0x04, 0x83, 0x64, 0x7d, 0x9d, 0x34, 0x63, 0x31, 0xbf, 0x32, 0x18, 0x67, + 0x70, 0x99, 0x95, 0xd2, 0x13, 0x8d, 0x35, 0xc6, 0xff, 0x62, 0x81, 0x8c, 0x3e, 0x03, 0xd5, 0xd8, + 0xdb, 0x22, 0x0b, 0xae, 0x2b, 0x2c, 0xf6, 0xfd, 0xf9, 0x87, 0xa9, 0x13, 0x76, 0x4d, 0x12, 0xc1, + 0x09, 0x3d, 0xfb, 0x4b, 0x25, 0x80, 0x24, 0xd8, 0x2e, 0xef, 0x33, 0x17, 0xbb, 0xde, 0x5f, 0x7c, + 0x3a, 0xe3, 0xfd, 0x45, 0x94, 0x10, 0xcc, 0x78, 0x7d, 0x51, 0x0d, 0x55, 0xb9, 0xd0, 0x50, 0x55, + 0xfa, 0x19, 0xaa, 0x25, 0x98, 0x4a, 0x82, 0x05, 0xcd, 0xa8, 0x6b, 0x96, 0x23, 0x79, 0x2d, 0x0d, + 0xc4, 0xdd, 0xf8, 0xf6, 0x97, 0x2c, 0x10, 0x1e, 0xc5, 0x05, 0x16, 0xb4, 0x2b, 0xdf, 0x4a, 0x33, + 0xd2, 0x21, 0x3e, 0x5b, 0xc4, 0xd9, 0x5a, 0x24, 0x41, 0x54, 0x7c, 0xdf, 0x48, 0x7d, 0x68, 0x50, + 0xb5, 0x7f, 0xc3, 0x82, 0x11, 0x0e, 0xbe, 0xce, 0x14, 0xd1, 0xfc, 0x7e, 0xf5, 0x95, 0x80, 0x9b, + 0x3d, 0x23, 0x46, 0x09, 0xab, 0x44, 0xcc, 0xfa, 0x33, 0x62, 0x12, 0x80, 0x13, 0x1c, 0xf4, 0x0c, + 0x0c, 0x45, 0x9d, 0xbb, 0x0c, 0x3d, 0xe5, 0x5e, 0xdc, 0xe0, 0xc5, 0x58, 0xc2, 0xed, 0x7f, 0x56, + 0x82, 0xc9, 0xb4, 0x77, 0x39, 0xc2, 0x30, 0xc8, 0x19, 0x48, 0x5a, 0xa7, 0x39, 0xc8, 0x58, 0xaa, + 0x79, 0xa7, 0x03, 0x7f, 0x0c, 0x9f, 0xb1, 0x20, 0x41, 0x09, 0xad, 0xc3, 0x88, 0x1b, 0xdc, 0xf7, + 0xef, 0x3b, 0xa1, 0xbb, 0x50, 0x5f, 0x15, 0x33, 0x91, 0xe3, 0x0f, 0x58, 0x4b, 0x2a, 0xe8, 0xbe, + 0xef, 0xcc, 0x78, 0x97, 0x80, 0xb0, 0x4e, 0x18, 0x7d, 0x96, 0x65, 0x6f, 0x59, 0xf7, 0x36, 0xae, + 0x3b, 0xed, 0x62, 0x9e, 0x2f, 0x4b, 0x12, 0x5d, 0x6b, 0x63, 0x4c, 0x24, 0x7b, 0xe1, 0x00, 0x9c, + 0x90, 0xb4, 0x7f, 0xe9, 0x24, 0x18, 0x6b, 0xc1, 0xc8, 0x92, 0x6d, 0x3d, 0xf4, 0x2c, 0xd9, 0x6f, + 0xc2, 0x30, 0xd9, 0x6a, 0xc7, 0x3b, 0x35, 0x2f, 0x2c, 0xf6, 0xe6, 0xc1, 0xb2, 0xc0, 0xee, 0xa6, + 0x2e, 0x21, 0x58, 0x51, 0xec, 0x91, 0xf3, 0xbc, 0xfc, 0x81, 0xc8, 0x79, 0x5e, 0xf9, 0x4b, 0xc9, + 0x79, 0xfe, 0x3a, 0x0c, 0x6d, 0x78, 0x31, 0x26, 0xed, 0x40, 0x9c, 0xc6, 0x39, 0x8b, 0xe7, 0x32, + 0x47, 0xee, 0xce, 0x86, 0x2b, 0x00, 0x58, 0x92, 0x43, 0x6b, 0x6a, 0x53, 0x0d, 0x16, 0x91, 0x41, + 0xbb, 0x8d, 0xe9, 0x99, 0xdb, 0x4a, 0xe4, 0x38, 0x1f, 0x7a, 0xff, 0x39, 0xce, 0x55, 0x66, 0xf2, + 0xe1, 0x87, 0x95, 0x99, 0xdc, 0xc8, 0xf0, 0x5e, 0x3d, 0x8a, 0x0c, 0xef, 0x5f, 0xb2, 0xe0, 0x64, + 0x3b, 0xeb, 0x7d, 0x04, 0x91, 0x63, 0xfc, 0xa7, 0x0e, 0xf1, 0x62, 0x84, 0xd1, 0x34, 0xcb, 0x06, + 0x92, 0x89, 0x86, 0xb3, 0x1b, 0x96, 0xa9, 0xe2, 0x47, 0xde, 0x7f, 0xaa, 0xf8, 0xa3, 0x4e, 0x46, + 0x9e, 0x24, 0x8e, 0x1f, 0x3b, 0x92, 0xc4, 0xf1, 0xe3, 0x0f, 0x31, 0x71, 0xbc, 0x96, 0xf2, 0x7d, + 0xe2, 0xe1, 0xa6, 0x7c, 0xdf, 0x34, 0xcf, 0x25, 0x9e, 0x61, 0xfc, 0xa5, 0xc2, 0xe7, 0x92, 0xd1, + 0xc2, 0xc1, 0x27, 0x13, 0x4f, 0x7e, 0x3f, 0xf5, 0x3e, 0x93, 0xdf, 0x1b, 0x29, 0xe4, 0xd1, 0x51, + 0xa4, 0x90, 0x7f, 0x5b, 0x3f, 0x41, 0x4f, 0x14, 0x69, 0x41, 0x1d, 0x94, 0xdd, 0x2d, 0x64, 0x9d, + 0xa1, 0xdd, 0x49, 0xea, 0xa7, 0x8f, 0x3b, 0x49, 0xfd, 0xc9, 0x23, 0x4c, 0x52, 0x7f, 0xea, 0x58, + 0x93, 0xd4, 0x3f, 0xf2, 0x01, 0x49, 0x52, 0x3f, 0x73, 0x5c, 0x49, 0xea, 0x1f, 0x7d, 0xb8, 0x49, + 0xea, 0xdf, 0x86, 0x6a, 0x5b, 0xc6, 0x68, 0xce, 0xcc, 0x16, 0x99, 0xba, 0xcc, 0x90, 0x4e, 0x3e, + 0x75, 0x0a, 0x84, 0x13, 0xa2, 0xb4, 0x85, 0x24, 0x69, 0xfd, 0x87, 0x8b, 0xb4, 0x90, 0x69, 0xf7, + 0x38, 0x20, 0x55, 0xfd, 0x17, 0x4a, 0x70, 0xfa, 0xe0, 0xdd, 0x91, 0x18, 0x4d, 0xea, 0x89, 0x2d, + 0x3b, 0x65, 0x34, 0x61, 0x92, 0xa7, 0x86, 0x55, 0x38, 0xf4, 0xfd, 0x32, 0x4c, 0x29, 0x9f, 0xb0, + 0x96, 0xd7, 0xdc, 0xd1, 0x9e, 0xce, 0x52, 0xb1, 0x0c, 0x8d, 0x34, 0x02, 0xee, 0xae, 0x83, 0x16, + 0x60, 0xc2, 0x28, 0x5c, 0xad, 0x09, 0xfd, 0x45, 0x59, 0x69, 0x1a, 0x26, 0x18, 0xa7, 0xf1, 0xed, + 0xaf, 0x5b, 0xf0, 0x48, 0x8f, 0xac, 0xb4, 0x85, 0xe3, 0xb9, 0xdb, 0x30, 0xd1, 0x36, 0xab, 0x16, + 0x4e, 0x0f, 0x61, 0x64, 0xc1, 0x55, 0xbd, 0x4e, 0x01, 0x70, 0x9a, 0xfc, 0xe2, 0xd9, 0xef, 0xfd, + 0xe0, 0xf4, 0x87, 0xfe, 0xe0, 0x07, 0xa7, 0x3f, 0xf4, 0xfd, 0x1f, 0x9c, 0xfe, 0xd0, 0xcf, 0xed, + 0x9d, 0xb6, 0xbe, 0xb7, 0x77, 0xda, 0xfa, 0x83, 0xbd, 0xd3, 0xd6, 0xf7, 0xf7, 0x4e, 0x5b, 0x7f, + 0xbe, 0x77, 0xda, 0xfa, 0xd2, 0x0f, 0x4f, 0x7f, 0xe8, 0x8d, 0xd2, 0xf6, 0x85, 0xff, 0x17, 0x00, + 0x00, 0xff, 0xff, 0x06, 0x5b, 0x53, 0xd1, 0x4d, 0xd1, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/generated.proto b/vendor/k8s.io/client-go/pkg/api/v1/generated.proto new file mode 100644 index 000000000..2accf2a6c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/generated.proto @@ -0,0 +1,4117 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.client_go.pkg.api.v1; + +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +message AWSElasticBlockStoreVolumeSource { + // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + optional string volumeID = 1; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 2; + + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // +optional + optional int32 partition = 3; + + // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". + // If omitted, the default is "false". + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // +optional + optional bool readOnly = 4; +} + +// Affinity is a group of affinity scheduling rules. +message Affinity { + // Describes node affinity scheduling rules for the pod. + // +optional + optional NodeAffinity nodeAffinity = 1; + + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + optional PodAffinity podAffinity = 2; + + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + optional PodAntiAffinity podAntiAffinity = 3; +} + +// AttachedVolume describes a volume attached to a node +message AttachedVolume { + // Name of the attached volume + optional string name = 1; + + // DevicePath represents the device path where the volume should be available + optional string devicePath = 2; +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +message AvoidPods { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + repeated PreferAvoidPodsEntry preferAvoidPods = 1; +} + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +message AzureDiskVolumeSource { + // The Name of the data disk in the blob storage + optional string diskName = 1; + + // The URI the data disk in the blob storage + optional string diskURI = 2; + + // Host Caching mode: None, Read Only, Read Write. + // +optional + optional string cachingMode = 3; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 4; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 5; + + // Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + optional string kind = 6; +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +message AzureFileVolumeSource { + // the name of secret that contains Azure Storage Account Name and Key + optional string secretName = 1; + + // Share Name + optional string shareName = 2; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 3; +} + +// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. +// Deprecated in 1.7, please use the bindings subresource of pods instead. +message Binding { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The target object that you want to bind to the standard object. + optional ObjectReference target = 2; +} + +// Adds and removes POSIX capabilities from running containers. +message Capabilities { + // Added capabilities + // +optional + repeated string add = 1; + + // Removed capabilities + // +optional + repeated string drop = 2; +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +message CephFSVolumeSource { + // Required: Monitors is a collection of Ceph monitors + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + repeated string monitors = 1; + + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + optional string path = 2; + + // Optional: User is the rados user name, default is admin + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + optional string user = 3; + + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + optional string secretFile = 4; + + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + optional LocalObjectReference secretRef = 5; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + optional bool readOnly = 6; +} + +// Represents a cinder volume resource in Openstack. +// A Cinder volume must exist before mounting to a container. +// The volume must also be in the same region as the kubelet. +// Cinder volumes support ownership management and SELinux relabeling. +message CinderVolumeSource { + // volume id used to identify the volume in cinder + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + optional string volumeID = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + optional string fsType = 2; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + optional bool readOnly = 3; +} + +// Information about the condition of a component. +message ComponentCondition { + // Type of condition for a component. + // Valid value: "Healthy" + optional string type = 1; + + // Status of the condition for a component. + // Valid values for "Healthy": "True", "False", or "Unknown". + optional string status = 2; + + // Message about the condition for a component. + // For example, information about a health check. + // +optional + optional string message = 3; + + // Condition error code for a component. + // For example, a health check error code. + // +optional + optional string error = 4; +} + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +message ComponentStatus { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // List of component conditions observed + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated ComponentCondition conditions = 2; +} + +// Status of all the conditions for the component as a list of ComponentStatus objects. +message ComponentStatusList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ComponentStatus objects. + repeated ComponentStatus items = 2; +} + +// ConfigMap holds configuration data for pods to consume. +message ConfigMap { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data contains the configuration data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // +optional + map data = 2; +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +message ConfigMapEnvSource { + // The ConfigMap to select from. + optional LocalObjectReference localObjectReference = 1; + + // Specify whether the ConfigMap must be defined + // +optional + optional bool optional = 2; +} + +// Selects a key from a ConfigMap. +message ConfigMapKeySelector { + // The ConfigMap to select from. + optional LocalObjectReference localObjectReference = 1; + + // The key to select. + optional string key = 2; + + // Specify whether the ConfigMap or it's key must be defined + // +optional + optional bool optional = 3; +} + +// ConfigMapList is a resource containing a list of ConfigMap objects. +message ConfigMapList { + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ConfigMaps. + repeated ConfigMap items = 2; +} + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +message ConfigMapProjection { + optional LocalObjectReference localObjectReference = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Specify whether the ConfigMap or it's keys must be defined + // +optional + optional bool optional = 4; +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +message ConfigMapVolumeSource { + optional LocalObjectReference localObjectReference = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 3; + + // Specify whether the ConfigMap or it's keys must be defined + // +optional + optional bool optional = 4; +} + +// A single application container that you want to run within a pod. +message Container { + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + optional string name = 1; + + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + optional string image = 2; + + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + repeated string command = 3; + + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + repeated string args = 4; + + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + optional string workingDir = 5; + + // List of ports to expose from the container. Exposing a port here gives + // the system additional information about the network connections a + // container uses, but is primarily informational. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Cannot be updated. + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + repeated ContainerPort ports = 6; + + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + repeated EnvFromSource envFrom = 19; + + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated EnvVar env = 7; + + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + // +optional + optional ResourceRequirements resources = 8; + + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + repeated VolumeMount volumeMounts = 9; + + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + optional Probe livenessProbe = 10; + + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + optional Probe readinessProbe = 11; + + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + // +optional + optional Lifecycle lifecycle = 12; + + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + optional string terminationMessagePath = 13; + + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + optional string terminationMessagePolicy = 20; + + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + optional string imagePullPolicy = 14; + + // Security options the pod should run with. + // More info: https://kubernetes.io/docs/concepts/policy/security-context/ + // More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md + // +optional + optional SecurityContext securityContext = 15; + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + optional bool stdin = 16; + + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + optional bool stdinOnce = 17; + + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + optional bool tty = 18; +} + +// Describe a container image +message ContainerImage { + // Names by which this image is known. + // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + repeated string names = 1; + + // The size of the image in bytes. + // +optional + optional int64 sizeBytes = 2; +} + +// ContainerPort represents a network port in a single container. +message ContainerPort { + // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + // named port in a pod must have a unique name. Name for the port that can be + // referred to by services. + // +optional + optional string name = 1; + + // Number of port to expose on the host. + // If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // Most containers do not need this. + // +optional + optional int32 hostPort = 2; + + // Number of port to expose on the pod's IP address. + // This must be a valid port number, 0 < x < 65536. + optional int32 containerPort = 3; + + // Protocol for port. Must be UDP or TCP. + // Defaults to "TCP". + // +optional + optional string protocol = 4; + + // What host IP to bind the external port to. + // +optional + optional string hostIP = 5; +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +message ContainerState { + // Details about a waiting container + // +optional + optional ContainerStateWaiting waiting = 1; + + // Details about a running container + // +optional + optional ContainerStateRunning running = 2; + + // Details about a terminated container + // +optional + optional ContainerStateTerminated terminated = 3; +} + +// ContainerStateRunning is a running state of a container. +message ContainerStateRunning { + // Time at which the container was last (re-)started + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1; +} + +// ContainerStateTerminated is a terminated state of a container. +message ContainerStateTerminated { + // Exit status from the last termination of the container + optional int32 exitCode = 1; + + // Signal from the last termination of the container + // +optional + optional int32 signal = 2; + + // (brief) reason from the last termination of the container + // +optional + optional string reason = 3; + + // Message regarding the last termination of the container + // +optional + optional string message = 4; + + // Time at which previous execution of the container started + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5; + + // Time at which the container last terminated + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6; + + // Container's ID in the format 'docker://' + // +optional + optional string containerID = 7; +} + +// ContainerStateWaiting is a waiting state of a container. +message ContainerStateWaiting { + // (brief) reason the container is not yet running. + // +optional + optional string reason = 1; + + // Message regarding why the container is not yet running. + // +optional + optional string message = 2; +} + +// ContainerStatus contains details for the current status of this container. +message ContainerStatus { + // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Cannot be updated. + optional string name = 1; + + // Details about the container's current condition. + // +optional + optional ContainerState state = 2; + + // Details about the container's last termination condition. + // +optional + optional ContainerState lastState = 3; + + // Specifies whether the container has passed its readiness probe. + optional bool ready = 4; + + // The number of times the container has been restarted, currently based on + // the number of dead containers that have not yet been removed. + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + optional int32 restartCount = 5; + + // The image the container is running. + // More info: https://kubernetes.io/docs/concepts/containers/images + // TODO(dchen1107): Which image the container is running with? + optional string image = 6; + + // ImageID of the container's image. + optional string imageID = 7; + + // Container's ID in the format 'docker://'. + // +optional + optional string containerID = 8; +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +message DaemonEndpoint { + // Port number of the given endpoint. + optional int32 Port = 1; +} + +// DeleteOptions may be provided when deleting an API object +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +// +k8s:openapi-gen=false +message DeleteOptions { + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + optional int64 gracePeriodSeconds = 1; + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + optional Preconditions preconditions = 2; + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + optional bool orphanDependents = 3; + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + optional string propagationPolicy = 4; +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +message DownwardAPIProjection { + // Items is a list of DownwardAPIVolume file + // +optional + repeated DownwardAPIVolumeFile items = 1; +} + +// DownwardAPIVolumeFile represents information to create the file containing the pod field +message DownwardAPIVolumeFile { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + optional string path = 1; + + // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // +optional + optional ObjectFieldSelector fieldRef = 2; + + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + optional ResourceFieldSelector resourceFieldRef = 3; + + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 mode = 4; +} + +// DownwardAPIVolumeSource represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +message DownwardAPIVolumeSource { + // Items is a list of downward API volume file + // +optional + repeated DownwardAPIVolumeFile items = 1; + + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 2; +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +message EmptyDirVolumeSource { + // What type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // Must be an empty string (default) or Memory. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + // +optional + optional string medium = 1; + + // Total amount of local storage required for this EmptyDir volume. + // The size limit is also applicable for memory medium. + // The maximum usage on memory medium EmptyDir would be the minimum value between + // the SizeLimit specified here and the sum of memory limits of all containers in a pod. + // The default is nil which means that the limit is undefined. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2; +} + +// EndpointAddress is a tuple that describes single IP address. +message EndpointAddress { + // The IP of this endpoint. + // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), + // or link-local multicast ((224.0.0.0/24). + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, See #4447. + optional string ip = 1; + + // The Hostname of this endpoint + // +optional + optional string hostname = 3; + + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + optional string nodeName = 4; + + // Reference to object providing the endpoint. + // +optional + optional ObjectReference targetRef = 2; +} + +// EndpointPort is a tuple that describes a single port. +message EndpointPort { + // The name of this port (corresponds to ServicePort.Name). + // Must be a DNS_LABEL. + // Optional only if one port is defined. + // +optional + optional string name = 1; + + // The port number of the endpoint. + optional int32 port = 2; + + // The IP protocol for this port. + // Must be UDP or TCP. + // Default is TCP. + // +optional + optional string protocol = 3; +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +message EndpointSubset { + // IP addresses which offer the related ports that are marked as ready. These endpoints + // should be considered safe for load balancers and clients to utilize. + // +optional + repeated EndpointAddress addresses = 1; + + // IP addresses which offer the related ports but are not currently marked as ready + // because they have not yet finished starting, have recently failed a readiness check, + // or have recently failed a liveness check. + // +optional + repeated EndpointAddress notReadyAddresses = 2; + + // Port numbers available on the related IP addresses. + // +optional + repeated EndpointPort ports = 3; +} + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +message Endpoints { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The set of all endpoints is the union of all subsets. Addresses are placed into + // subsets according to the IPs they share. A single address with multiple ports, + // some of which are ready and some of which are not (because they come from + // different containers) will result in the address being displayed in different + // subsets for the different ports. No address will appear in both Addresses and + // NotReadyAddresses in the same subset. + // Sets of addresses and ports that comprise a service. + repeated EndpointSubset subsets = 2; +} + +// EndpointsList is a list of endpoints. +message EndpointsList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of endpoints. + repeated Endpoints items = 2; +} + +// EnvFromSource represents the source of a set of ConfigMaps +message EnvFromSource { + // An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + optional string prefix = 1; + + // The ConfigMap to select from + // +optional + optional ConfigMapEnvSource configMapRef = 2; + + // The Secret to select from + // +optional + optional SecretEnvSource secretRef = 3; +} + +// EnvVar represents an environment variable present in a Container. +message EnvVar { + // Name of the environment variable. Must be a C_IDENTIFIER. + optional string name = 1; + + // Variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // Defaults to "". + // +optional + optional string value = 2; + + // Source for the environment variable's value. Cannot be used if value is not empty. + // +optional + optional EnvVarSource valueFrom = 3; +} + +// EnvVarSource represents a source for the value of an EnvVar. +message EnvVarSource { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. + // +optional + optional ObjectFieldSelector fieldRef = 1; + + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + optional ResourceFieldSelector resourceFieldRef = 2; + + // Selects a key of a ConfigMap. + // +optional + optional ConfigMapKeySelector configMapKeyRef = 3; + + // Selects a key of a secret in the pod's namespace + // +optional + optional SecretKeySelector secretKeyRef = 4; +} + +// Event is a report of an event somewhere in the cluster. +// TODO: Decide whether to store these separately or with the object they apply to. +message Event { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The object that this event is about. + optional ObjectReference involvedObject = 2; + + // This should be a short, machine understandable string that gives the reason + // for the transition into the object's current status. + // TODO: provide exact specification for format. + // +optional + optional string reason = 3; + + // A human-readable description of the status of this operation. + // TODO: decide on maximum length. + // +optional + optional string message = 4; + + // The component reporting this event. Should be a short machine understandable string. + // +optional + optional EventSource source = 5; + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6; + + // The time at which the most recent occurrence of this event was recorded. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7; + + // The number of times this event has occurred. + // +optional + optional int32 count = 8; + + // Type of this event (Normal, Warning), new types could be added in the future + // +optional + optional string type = 9; +} + +// EventList is a list of events. +message EventList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of events + repeated Event items = 2; +} + +// EventSource contains information for an event. +message EventSource { + // Component from which the event is generated. + // +optional + optional string component = 1; + + // Node name on which the event is generated. + // +optional + optional string host = 2; +} + +// ExecAction describes a "run in container" action. +message ExecAction { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + // +optional + repeated string command = 1; +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +message FCVolumeSource { + // Required: FC target worldwide names (WWNs) + repeated string targetWWNs = 1; + + // Required: FC target lun number + optional int32 lun = 2; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 3; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +message FlexVolumeSource { + // Driver is the name of the driver to use for this volume. + optional string driver = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + optional string fsType = 2; + + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + optional LocalObjectReference secretRef = 3; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; + + // Optional: Extra command options if any. + // +optional + map options = 5; +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +message FlockerVolumeSource { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + optional string datasetName = 1; + + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + optional string datasetUUID = 2; +} + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +message GCEPersistentDiskVolumeSource { + // Unique name of the PD resource in GCE. Used to identify the disk in GCE. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + optional string pdName = 1; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 2; + + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + optional int32 partition = 3; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + optional bool readOnly = 4; +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +message GitRepoVolumeSource { + // Repository URL + optional string repository = 1; + + // Commit hash for the specified revision. + // +optional + optional string revision = 2; + + // Target directory name. + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + optional string directory = 3; +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +message GlusterfsVolumeSource { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + optional string endpoints = 1; + + // Path is the Glusterfs volume path. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + optional string path = 2; + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + optional bool readOnly = 3; +} + +// HTTPGetAction describes an action based on HTTP Get requests. +message HTTPGetAction { + // Path to access on the HTTP server. + // +optional + optional string path = 1; + + // Name or number of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; + + // Host name to connect to, defaults to the pod IP. You probably want to set + // "Host" in httpHeaders instead. + // +optional + optional string host = 3; + + // Scheme to use for connecting to the host. + // Defaults to HTTP. + // +optional + optional string scheme = 4; + + // Custom headers to set in the request. HTTP allows repeated headers. + // +optional + repeated HTTPHeader httpHeaders = 5; +} + +// HTTPHeader describes a custom header to be used in HTTP probes +message HTTPHeader { + // The header field name + optional string name = 1; + + // The header field value + optional string value = 2; +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +message Handler { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + optional ExecAction exec = 1; + + // HTTPGet specifies the http request to perform. + // +optional + optional HTTPGetAction httpGet = 2; + + // TCPSocket specifies an action involving a TCP port. + // TCP hooks not yet supported + // TODO: implement a realistic TCP lifecycle hook + // +optional + optional TCPSocketAction tcpSocket = 3; +} + +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +message HostAlias { + // IP address of the host file entry. + optional string ip = 1; + + // Hostnames for the above IP address. + repeated string hostnames = 2; +} + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +message HostPathVolumeSource { + // Path of the directory on the host. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + optional string path = 1; +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +message ISCSIVolumeSource { + // iSCSI target portal. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + optional string targetPortal = 1; + + // Target iSCSI Qualified Name. + optional string iqn = 2; + + // iSCSI target lun number. + optional int32 lun = 3; + + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + optional string iscsiInterface = 4; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 5; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + optional bool readOnly = 6; + + // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + repeated string portals = 7; + + // whether support iSCSI Discovery CHAP authentication + // +optional + optional bool chapAuthDiscovery = 8; + + // whether support iSCSI Session CHAP authentication + // +optional + optional bool chapAuthSession = 11; + + // CHAP secret for iSCSI target and initiator authentication + // +optional + optional LocalObjectReference secretRef = 10; +} + +// Maps a string key to a path within a volume. +message KeyToPath { + // The key to project. + optional string key = 1; + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + optional string path = 2; + + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 mode = 3; +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +message Lifecycle { + // PostStart is called immediately after a container is created. If the handler fails, + // the container is terminated and restarted according to its restart policy. + // Other management of the container blocks until the hook completes. + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + // +optional + optional Handler postStart = 1; + + // PreStop is called immediately before a container is terminated. + // The container is terminated after the handler completes. + // The reason for termination is passed to the handler. + // Regardless of the outcome of the handler, the container is eventually terminated. + // Other management of the container blocks until the hook completes. + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + // +optional + optional Handler preStop = 2; +} + +// LimitRange sets resource usage limits for each kind of resource in a Namespace. +message LimitRange { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the limits enforced. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional LimitRangeSpec spec = 2; +} + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind. +message LimitRangeItem { + // Type of resource that this limit applies to. + // +optional + optional string type = 1; + + // Max usage constraints on this kind by resource name. + // +optional + map max = 2; + + // Min usage constraints on this kind by resource name. + // +optional + map min = 3; + + // Default resource requirement limit value by resource name if resource limit is omitted. + // +optional + map default = 4; + + // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. + // +optional + map defaultRequest = 5; + + // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. + // +optional + map maxLimitRequestRatio = 6; +} + +// LimitRangeList is a list of LimitRange items. +message LimitRangeList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of LimitRange objects. + // More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_limit_range.md + repeated LimitRange items = 2; +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind. +message LimitRangeSpec { + // Limits is the list of LimitRangeItem objects that are enforced. + repeated LimitRangeItem limits = 1; +} + +// List holds a list of objects, which may not be known by the server. +message List { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of objects + repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; +} + +// ListOptions is the query options to a standard REST list call. +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +// +k8s:openapi-gen=false +message ListOptions { + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + optional string labelSelector = 1; + + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + optional string fieldSelector = 2; + + // If true, partially initialized resources are included in the response. + // +optional + optional bool includeUninitialized = 6; + + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + optional bool watch = 3; + + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + // +optional + optional string resourceVersion = 4; + + // Timeout for the list/watch call. + // +optional + optional int64 timeoutSeconds = 5; +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +message LoadBalancerIngress { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + optional string ip = 1; + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + optional string hostname = 2; +} + +// LoadBalancerStatus represents the status of a load-balancer. +message LoadBalancerStatus { + // Ingress is a list containing ingress points for the load-balancer. + // Traffic intended for the service should be sent to these ingress points. + // +optional + repeated LoadBalancerIngress ingress = 1; +} + +// LocalObjectReference contains enough information to let you locate the +// referenced object inside the same namespace. +message LocalObjectReference { + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // TODO: Add other useful fields. apiVersion, kind, uid? + // +optional + optional string name = 1; +} + +// Local represents directly-attached storage with node affinity +message LocalVolumeSource { + // The full path to the volume on the node + // For alpha, this path must be a directory + // Once block as a source is supported, then this path can point to a block device + optional string path = 1; +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +message NFSVolumeSource { + // Server is the hostname or IP address of the NFS server. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + optional string server = 1; + + // Path that is exported by the NFS server. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + optional string path = 2; + + // ReadOnly here will force + // the NFS export to be mounted with read-only permissions. + // Defaults to false. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + // +optional + optional bool readOnly = 3; +} + +// Namespace provides a scope for Names. +// Use of multiple namespaces is optional. +message Namespace { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the behavior of the Namespace. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional NamespaceSpec spec = 2; + + // Status describes the current status of a Namespace. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional NamespaceStatus status = 3; +} + +// NamespaceList is a list of Namespaces. +message NamespaceList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Namespace objects in the list. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + repeated Namespace items = 2; +} + +// NamespaceSpec describes the attributes on a Namespace. +message NamespaceSpec { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. + // More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#finalizers + // +optional + repeated string finalizers = 1; +} + +// NamespaceStatus is information about the current status of a Namespace. +message NamespaceStatus { + // Phase is the current lifecycle phase of the namespace. + // More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#phases + // +optional + optional string phase = 1; +} + +// Node is a worker node in Kubernetes. +// Each node will have a unique identifier in the cache (i.e. in etcd). +message Node { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the behavior of a node. + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional NodeSpec spec = 2; + + // Most recently observed status of the node. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional NodeStatus status = 3; +} + +// NodeAddress contains information for the node's address. +message NodeAddress { + // Node address type, one of Hostname, ExternalIP or InternalIP. + optional string type = 1; + + // The node address. + optional string address = 2; +} + +// Node affinity is a group of node affinity scheduling rules. +message NodeAffinity { + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + optional NodeSelector requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + repeated PreferredSchedulingTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + +// NodeCondition contains condition information for a node. +message NodeCondition { + // Type of node condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time we got an update on a given condition. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3; + + // Last time the condition transit from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + // +optional + optional string reason = 5; + + // Human readable message indicating details about last transition. + // +optional + optional string message = 6; +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +message NodeDaemonEndpoints { + // Endpoint on which Kubelet is listening. + // +optional + optional DaemonEndpoint kubeletEndpoint = 1; +} + +// NodeList is the whole list of all Nodes which have been registered with master. +message NodeList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of nodes + repeated Node items = 2; +} + +// NodeProxyOptions is the query options to a Node's proxy call. +message NodeProxyOptions { + // Path is the URL path to use for the current proxy request to node. + // +optional + optional string path = 1; +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +message NodeResources { + // Capacity represents the available resources of a node + map capacity = 1; +} + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +message NodeSelector { + // Required. A list of node selector terms. The terms are ORed. + repeated NodeSelectorTerm nodeSelectorTerms = 1; +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +message NodeSelectorRequirement { + // The label key that the selector applies to. + // +patchMergeKey=key + // +patchStrategy=merge + optional string key = 1; + + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + optional string operator = 2; + + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + repeated string values = 3; +} + +// A null or empty node selector term matches no objects. +message NodeSelectorTerm { + // Required. A list of node selector requirements. The requirements are ANDed. + repeated NodeSelectorRequirement matchExpressions = 1; +} + +// NodeSpec describes the attributes that a node is created with. +message NodeSpec { + // PodCIDR represents the pod IP range assigned to the node. + // +optional + optional string podCIDR = 1; + + // External ID of the node assigned by some machine database (e.g. a cloud provider). + // Deprecated. + // +optional + optional string externalID = 2; + + // ID of the node assigned by the cloud provider in the format: :// + // +optional + optional string providerID = 3; + + // Unschedulable controls node schedulability of new pods. By default, node is schedulable. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration + // +optional + optional bool unschedulable = 4; + + // If specified, the node's taints. + // +optional + repeated Taint taints = 5; +} + +// NodeStatus is information about the current status of a node. +message NodeStatus { + // Capacity represents the total resources of a node. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // +optional + map capacity = 1; + + // Allocatable represents the resources of a node that are available for scheduling. + // Defaults to Capacity. + // +optional + map allocatable = 2; + + // NodePhase is the recently observed lifecycle phase of the node. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase + // The field is never populated, and now is deprecated. + // +optional + optional string phase = 3; + + // Conditions is an array of current observed node conditions. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#condition + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated NodeCondition conditions = 4; + + // List of addresses reachable to the node. + // Queried from cloud provider, if available. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated NodeAddress addresses = 5; + + // Endpoints of daemons running on the Node. + // +optional + optional NodeDaemonEndpoints daemonEndpoints = 6; + + // Set of ids/uuids to uniquely identify the node. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#info + // +optional + optional NodeSystemInfo nodeInfo = 7; + + // List of container images on this node + // +optional + repeated ContainerImage images = 8; + + // List of attachable volumes in use (mounted) by the node. + // +optional + repeated string volumesInUse = 9; + + // List of volumes that are attached to the node. + // +optional + repeated AttachedVolume volumesAttached = 10; +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +message NodeSystemInfo { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is preferred. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + optional string machineID = 1; + + // SystemUUID reported by the node. For unique machine identification + // MachineID is preferred. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + optional string systemUUID = 2; + + // Boot ID reported by the node. + optional string bootID = 3; + + // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). + optional string kernelVersion = 4; + + // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). + optional string osImage = 5; + + // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). + optional string containerRuntimeVersion = 6; + + // Kubelet Version reported by the node. + optional string kubeletVersion = 7; + + // KubeProxy Version reported by the node. + optional string kubeProxyVersion = 8; + + // The Operating System reported by the node + optional string operatingSystem = 9; + + // The Architecture reported by the node + optional string architecture = 10; +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +message ObjectFieldSelector { + // Version of the schema the FieldPath is written in terms of, defaults to "v1". + // +optional + optional string apiVersion = 1; + + // Path of the field to select in the specified API version. + optional string fieldPath = 2; +} + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. +// +k8s:openapi-gen=false +message ObjectMeta { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +optional + optional string name = 1; + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // +optional + optional string generateName = 2; + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + // +optional + optional string namespace = 3; + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + optional string selfLink = 4; + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + // +optional + optional string uid = 5; + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 6; + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + optional int64 generation = 7; + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time creationTimestamp = 8; + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deletionTimestamp = 9; + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + optional int64 deletionGracePeriodSeconds = 10; + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // +optional + map labels = 11; + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + // +optional + map annotations = 12; + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + // +patchMergeKey=uid + // +patchStrategy=merge + repeated k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference ownerReferences = 13; + + // An initializer is a controller which enforces some system invariant at object creation time. + // This field is a list of initializers that have not yet acted on this object. If nil or empty, + // this object has been completely initialized. Otherwise, the object is considered uninitialized + // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to + // observe uninitialized objects. + // + // When an object is created, the system will populate this list with the current set of initializers. + // Only privileged users may set or modify this list. Once it is empty, it may not be modified further + // by any user. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Initializers initializers = 16; + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + // +patchStrategy=merge + repeated string finalizers = 14; + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + optional string clusterName = 15; +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +message ObjectReference { + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional string kind = 1; + + // Namespace of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + // +optional + optional string namespace = 2; + + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +optional + optional string name = 3; + + // UID of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + // +optional + optional string uid = 4; + + // API version of the referent. + // +optional + optional string apiVersion = 5; + + // Specific resourceVersion to which this reference is made, if any. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 6; + + // If referring to a piece of an object instead of an entire object, this string + // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + // For example, if the object reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + optional string fieldPath = 7; +} + +// PersistentVolume (PV) is a storage resource provisioned by an administrator. +// It is analogous to a node. +// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes +message PersistentVolume { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines a specification of a persistent volume owned by the cluster. + // Provisioned by an administrator. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes + // +optional + optional PersistentVolumeSpec spec = 2; + + // Status represents the current information/status for the persistent volume. + // Populated by the system. + // Read-only. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes + // +optional + optional PersistentVolumeStatus status = 3; +} + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +message PersistentVolumeClaim { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired characteristics of a volume requested by a pod author. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + optional PersistentVolumeClaimSpec spec = 2; + + // Status represents the current information/status of a persistent volume claim. + // Read-only. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + optional PersistentVolumeClaimStatus status = 3; +} + +// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. +message PersistentVolumeClaimList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // A list of persistent volume claims. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + repeated PersistentVolumeClaim items = 2; +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +message PersistentVolumeClaimSpec { + // AccessModes contains the desired access modes the volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + // +optional + repeated string accessModes = 1; + + // A label query over volumes to consider for binding. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + + // Resources represents the minimum resources the volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + // +optional + optional ResourceRequirements resources = 2; + + // VolumeName is the binding reference to the PersistentVolume backing this claim. + // +optional + optional string volumeName = 3; + + // Name of the StorageClass required by the claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + // +optional + optional string storageClassName = 5; +} + +// PersistentVolumeClaimStatus is the current status of a persistent volume claim. +message PersistentVolumeClaimStatus { + // Phase represents the current phase of PersistentVolumeClaim. + // +optional + optional string phase = 1; + + // AccessModes contains the actual access modes the volume backing the PVC has. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + // +optional + repeated string accessModes = 2; + + // Represents the actual resources of the underlying volume. + // +optional + map capacity = 3; +} + +// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. +// This volume finds the bound PV and mounts that volume for the pod. A +// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another +// type of volume that is owned by someone else (the system). +message PersistentVolumeClaimVolumeSource { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + optional string claimName = 1; + + // Will force the ReadOnly setting in VolumeMounts. + // Default false. + // +optional + optional bool readOnly = 2; +} + +// PersistentVolumeList is a list of PersistentVolume items. +message PersistentVolumeList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of persistent volumes. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + repeated PersistentVolume items = 2; +} + +// PersistentVolumeSource is similar to VolumeSource but meant for the +// administrator who creates PVs. Exactly one of its members must be set. +message PersistentVolumeSource { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1; + + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // +optional + optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2; + + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // +optional + optional HostPathVolumeSource hostPath = 3; + + // Glusterfs represents a Glusterfs volume that is attached to a host and + // exposed to the pod. Provisioned by an admin. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // +optional + optional GlusterfsVolumeSource glusterfs = 4; + + // NFS represents an NFS mount on the host. Provisioned by an admin. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + // +optional + optional NFSVolumeSource nfs = 5; + + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // +optional + optional RBDVolumeSource rbd = 6; + + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // +optional + optional ISCSIVolumeSource iscsi = 7; + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + optional CinderVolumeSource cinder = 8; + + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + optional CephFSVolumeSource cephfs = 9; + + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + optional FCVolumeSource fc = 10; + + // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running + // +optional + optional FlockerVolumeSource flocker = 11; + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an + // alpha feature and may change in future. + // +optional + optional FlexVolumeSource flexVolume = 12; + + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + optional AzureFileVolumeSource azureFile = 13; + + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + optional VsphereVirtualDiskVolumeSource vsphereVolume = 14; + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + optional QuobyteVolumeSource quobyte = 15; + + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + optional AzureDiskVolumeSource azureDisk = 16; + + // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 17; + + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + optional PortworxVolumeSource portworxVolume = 18; + + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + optional ScaleIOVolumeSource scaleIO = 19; + + // Local represents directly-attached storage with node affinity + // +optional + optional LocalVolumeSource local = 20; + + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // +optional + optional StorageOSPersistentVolumeSource storageos = 21; +} + +// PersistentVolumeSpec is the specification of a persistent volume. +message PersistentVolumeSpec { + // A description of the persistent volume's resources and capacity. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // +optional + map capacity = 1; + + // The actual volume backing the persistent volume. + optional PersistentVolumeSource persistentVolumeSource = 2; + + // AccessModes contains all ways the volume can be mounted. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes + // +optional + repeated string accessModes = 3; + + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // Expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding + // +optional + optional ObjectReference claimRef = 4; + + // What happens to a persistent volume when released from its claim. + // Valid options are Retain (default) and Recycle. + // Recycling must be supported by the volume plugin underlying this persistent volume. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming + // +optional + optional string persistentVolumeReclaimPolicy = 5; + + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + optional string storageClassName = 6; +} + +// PersistentVolumeStatus is the current status of a persistent volume. +message PersistentVolumeStatus { + // Phase indicates if a volume is available, bound to a claim, or released by a claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase + // +optional + optional string phase = 1; + + // A human-readable message indicating details about why the volume is in this state. + // +optional + optional string message = 2; + + // Reason is a brief CamelCase string that describes any failure and is meant + // for machine parsing and tidy display in the CLI. + // +optional + optional string reason = 3; +} + +// Represents a Photon Controller persistent disk resource. +message PhotonPersistentDiskVolumeSource { + // ID that identifies Photon Controller persistent disk + optional string pdID = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + optional string fsType = 2; +} + +// Pod is a collection of containers that can run on a host. This resource is created +// by clients and scheduled onto hosts. +message Pod { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the pod. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional PodSpec spec = 2; + + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional PodStatus status = 3; +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +message PodAffinity { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key tches that of any node on which +// a pod of the set of pods is running +message PodAffinityTerm { + // A label query over a set of resources, in this case pods. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; + + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + repeated string namespaces = 2; + + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" + // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); + // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. + // +optional + optional string topologyKey = 3; +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +message PodAntiAffinity { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + +// PodAttachOptions is the query options to a Pod's remote attach call. +// --- +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +message PodAttachOptions { + // Stdin if true, redirects the standard input stream of the pod for this call. + // Defaults to false. + // +optional + optional bool stdin = 1; + + // Stdout if true indicates that stdout is to be redirected for the attach call. + // Defaults to true. + // +optional + optional bool stdout = 2; + + // Stderr if true indicates that stderr is to be redirected for the attach call. + // Defaults to true. + // +optional + optional bool stderr = 3; + + // TTY if true indicates that a tty will be allocated for the attach call. + // This is passed through the container runtime so the tty + // is allocated on the worker node by the container runtime. + // Defaults to false. + // +optional + optional bool tty = 4; + + // The container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + optional string container = 5; +} + +// PodCondition contains details for the current condition of this pod. +message PodCondition { + // Type is the type of the condition. + // Currently only Ready. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + optional string type = 1; + + // Status is the status of the condition. + // Can be True, False, Unknown. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + optional string status = 2; + + // Last time we probed the condition. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + optional string reason = 5; + + // Human-readable message indicating details about last transition. + // +optional + optional string message = 6; +} + +// PodExecOptions is the query options to a Pod's remote exec call. +// --- +// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +message PodExecOptions { + // Redirect the standard input stream of the pod for this call. + // Defaults to false. + // +optional + optional bool stdin = 1; + + // Redirect the standard output stream of the pod for this call. + // Defaults to true. + // +optional + optional bool stdout = 2; + + // Redirect the standard error stream of the pod for this call. + // Defaults to true. + // +optional + optional bool stderr = 3; + + // TTY if true indicates that a tty will be allocated for the exec call. + // Defaults to false. + // +optional + optional bool tty = 4; + + // Container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + optional string container = 5; + + // Command is the remote command to execute. argv array. Not executed within a shell. + repeated string command = 6; +} + +// PodList is a list of Pods. +message PodList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of pods. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md + repeated Pod items = 2; +} + +// PodLogOptions is the query options for a Pod's logs REST call. +message PodLogOptions { + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + // +optional + optional string container = 1; + + // Follow the log stream of the pod. Defaults to false. + // +optional + optional bool follow = 2; + + // Return previous terminated container logs. Defaults to false. + // +optional + optional bool previous = 3; + + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + optional int64 sinceSeconds = 4; + + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; + + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + // +optional + optional bool timestamps = 6; + + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + // +optional + optional int64 tailLines = 7; + + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + // +optional + optional int64 limitBytes = 8; +} + +// PodPortForwardOptions is the query options to a Pod's port forward call +// when using WebSockets. +// The `port` query parameter must specify the port or +// ports (comma separated) to forward over. +// Port forwarding over SPDY does not use these options. It requires the port +// to be passed in the `port` header as part of request. +message PodPortForwardOptions { + // List of ports to forward + // Required when using WebSockets + // +optional + repeated int32 ports = 1; +} + +// PodProxyOptions is the query options to a Pod's proxy call. +message PodProxyOptions { + // Path is the URL path to use for the current proxy request to pod. + // +optional + optional string path = 1; +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +message PodSecurityContext { + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + optional SELinuxOptions seLinuxOptions = 1; + + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + optional int64 runAsUser = 2; + + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional bool runAsNonRoot = 3; + + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + repeated int64 supplementalGroups = 4; + + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + optional int64 fsGroup = 5; +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +message PodSignature { + // Reference to controller whose pods should avoid this node. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1; +} + +// PodSpec is a description of a pod. +message PodSpec { + // List of volumes that can be mounted by containers belonging to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated Volume volumes = 1; + + // List of initialization containers belonging to the pod. + // Init containers are executed in order prior to containers being started. If any + // init container fails, the pod is considered to have failed and is handled according + // to its restartPolicy. The name for an init container or normal container must be + // unique among all containers. + // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // The resourceRequirements of an init container are taken into account during scheduling + // by finding the highest request/limit for each resource type, and then using the max of + // of that value or the sum of the normal containers. Limits are applied to init containers + // in a similar fashion. + // Init containers cannot currently be added or removed. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // +patchMergeKey=name + // +patchStrategy=merge + repeated Container initContainers = 20; + + // List of containers belonging to the pod. + // Containers cannot currently be added or removed. + // There must be at least one container in a Pod. + // Cannot be updated. + // +patchMergeKey=name + // +patchStrategy=merge + repeated Container containers = 2; + + // Restart policy for all containers within the pod. + // One of Always, OnFailure, Never. + // Default to Always. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + // +optional + optional string restartPolicy = 3; + + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. + // +optional + optional int64 terminationGracePeriodSeconds = 4; + + // Optional duration in seconds the pod may be active on the node relative to + // StartTime before the system will actively try to mark it failed and kill associated containers. + // Value must be a positive integer. + // +optional + optional int64 activeDeadlineSeconds = 5; + + // Set DNS policy for containers within the pod. + // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. + // Defaults to "ClusterFirst". + // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + // +optional + optional string dnsPolicy = 6; + + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + map nodeSelector = 7; + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + // +optional + optional string serviceAccountName = 8; + + // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + // Deprecated: Use serviceAccountName instead. + // +k8s:conversion-gen=false + // +optional + optional string serviceAccount = 9; + + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + optional bool automountServiceAccountToken = 21; + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + optional string nodeName = 10; + + // Host networking requested for this pod. Use the host's network namespace. + // If this option is set, the ports that will be used must be specified. + // Default to false. + // +k8s:conversion-gen=false + // +optional + optional bool hostNetwork = 11; + + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + optional bool hostPID = 12; + + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + optional bool hostIPC = 13; + + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + optional PodSecurityContext securityContext = 14; + + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated LocalObjectReference imagePullSecrets = 15; + + // Specifies the hostname of the Pod + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + optional string hostname = 16; + + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + // +optional + optional string subdomain = 17; + + // If specified, the pod's scheduling constraints + // +optional + optional Affinity affinity = 18; + + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + optional string schedulerName = 19; + + // If specified, the pod's tolerations. + // +optional + repeated Toleration tolerations = 22; + + // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + // file if specified. This is only valid for non-hostNetwork pods. + // +optional + // +patchMergeKey=IP + // +patchStrategy=merge + repeated HostAlias hostMappings = 23; +} + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system. +message PodStatus { + // Current condition of the pod. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase + // +optional + optional string phase = 1; + + // Current service state of pod. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated PodCondition conditions = 2; + + // A human readable message indicating details about why the pod is in this condition. + // +optional + optional string message = 3; + + // A brief CamelCase message indicating details about why the pod is in this state. + // e.g. 'OutOfDisk' + // +optional + optional string reason = 4; + + // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + // +optional + optional string hostIP = 5; + + // IP address allocated to the pod. Routable at least within the cluster. + // Empty if not yet allocated. + // +optional + optional string podIP = 6; + + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + repeated ContainerStatus initContainerStatuses = 10; + + // The list has one entry per container in the manifest. Each entry is currently the output + // of `docker inspect`. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + // +optional + repeated ContainerStatus containerStatuses = 8; + + // The Quality of Service (QOS) classification assigned to the pod based on resource requirements + // See PodQOSClass type for available QOS classes + // More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md + // +optional + optional string qosClass = 9; +} + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +message PodStatusResult { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional PodStatus status = 2; +} + +// PodTemplate describes a template for creating copies of a predefined pod. +message PodTemplate { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Template defines the pods that will be created from this pod template. + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional PodTemplateSpec template = 2; +} + +// PodTemplateList is a list of PodTemplates. +message PodTemplateList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of pod templates + repeated PodTemplate items = 2; +} + +// PodTemplateSpec describes the data a pod should have when created from a template +message PodTemplateSpec { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the pod. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional PodSpec spec = 2; +} + +// PortworxVolumeSource represents a Portworx volume resource. +message PortworxVolumeSource { + // VolumeID uniquely identifies a Portworx volume + optional string volumeID = 1; + + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + optional string fsType = 2; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 3; +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +// +k8s:openapi-gen=false +message Preconditions { + // Specifies the target UID. + // +optional + optional string uid = 1; +} + +// Describes a class of pods that should avoid this node. +message PreferAvoidPodsEntry { + // The class of pods. + optional PodSignature podSignature = 1; + + // Time at which this entry was added to the list. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2; + + // (brief) reason why this entry was added to the list. + // +optional + optional string reason = 3; + + // Human readable message indicating why this entry was added to the list. + // +optional + optional string message = 4; +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +message PreferredSchedulingTerm { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + optional int32 weight = 1; + + // A node selector term, associated with the corresponding weight. + optional NodeSelectorTerm preference = 2; +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +message Probe { + // The action taken to determine the health of a container + optional Handler handler = 1; + + // Number of seconds after the container has started before liveness probes are initiated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + optional int32 initialDelaySeconds = 2; + + // Number of seconds after which the probe times out. + // Defaults to 1 second. Minimum value is 1. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + optional int32 timeoutSeconds = 3; + + // How often (in seconds) to perform the probe. + // Default to 10 seconds. Minimum value is 1. + // +optional + optional int32 periodSeconds = 4; + + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Defaults to 1. Must be 1 for liveness. Minimum value is 1. + // +optional + optional int32 successThreshold = 5; + + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // Defaults to 3. Minimum value is 1. + // +optional + optional int32 failureThreshold = 6; +} + +// Represents a projected volume source +message ProjectedVolumeSource { + // list of volume projections + repeated VolumeProjection sources = 1; + + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 2; +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +message QuobyteVolumeSource { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + optional string registry = 1; + + // Volume is a string that references an already created Quobyte volume by name. + optional string volume = 2; + + // ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. + // Defaults to false. + // +optional + optional bool readOnly = 3; + + // User to map volume access to + // Defaults to serivceaccount user + // +optional + optional string user = 4; + + // Group to map volume access to + // Default is no group + // +optional + optional string group = 5; +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +message RBDVolumeSource { + // A collection of Ceph monitors. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + repeated string monitors = 1; + + // The rados image name. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + optional string image = 2; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 3; + + // The rados pool name. + // Default is rbd. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional string pool = 4; + + // The rados user name. + // Default is admin. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional string user = 5; + + // Keyring is the path to key ring for RBDUser. + // Default is /etc/ceph/keyring. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional string keyring = 6; + + // SecretRef is name of the authentication secret for RBDUser. If provided + // overrides keyring. + // Default is nil. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional LocalObjectReference secretRef = 7; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional bool readOnly = 8; +} + +// RangeAllocation is not a public type. +message RangeAllocation { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Range is string that identifies the range represented by 'data'. + optional string range = 2; + + // Data is a bit array containing all allocated addresses in the previous segment. + optional bytes data = 3; +} + +// ReplicationController represents the configuration of a replication controller. +message ReplicationController { + // If the Labels of a ReplicationController are empty, they are defaulted to + // be the same as the Pod(s) that the replication controller manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the specification of the desired behavior of the replication controller. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicationControllerSpec spec = 2; + + // Status is the most recently observed status of the replication controller. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicationControllerStatus status = 3; +} + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +message ReplicationControllerCondition { + // Type of replication controller condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// ReplicationControllerList is a collection of replication controllers. +message ReplicationControllerList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of replication controllers. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + repeated ReplicationController items = 2; +} + +// ReplicationControllerSpec is the specification of a replication controller. +message ReplicationControllerSpec { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller + // +optional + optional int32 replicas = 1; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 4; + + // Selector is a label query over pods that should match the Replicas count. + // If Selector is empty, it is defaulted to the labels present on the Pod template. + // Label keys and values that must match in order to be controlled by this replication + // controller, if empty defaulted to labels on Pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + map selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. This takes precedence over a TemplateRef. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + optional PodTemplateSpec template = 3; +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +message ReplicationControllerStatus { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller + optional int32 replicas = 1; + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + optional int32 fullyLabeledReplicas = 2; + + // The number of ready replicas for this replication controller. + // +optional + optional int32 readyReplicas = 4; + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + optional int32 availableReplicas = 5; + + // ObservedGeneration reflects the generation of the most recently observed replication controller. + // +optional + optional int64 observedGeneration = 3; + + // Represents the latest available observations of a replication controller's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated ReplicationControllerCondition conditions = 6; +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +message ResourceFieldSelector { + // Container name: required for volumes, optional for env vars + // +optional + optional string containerName = 1; + + // Required: resource to select + optional string resource = 2; + + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3; +} + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +message ResourceQuota { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired quota. + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional ResourceQuotaSpec spec = 2; + + // Status defines the actual enforced quota and its current usage. + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional ResourceQuotaStatus status = 3; +} + +// ResourceQuotaList is a list of ResourceQuota items. +message ResourceQuotaList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ResourceQuota objects. + // More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md + repeated ResourceQuota items = 2; +} + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. +message ResourceQuotaSpec { + // Hard is the set of desired hard limits for each named resource. + // More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md + // +optional + map hard = 1; + + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + repeated string scopes = 2; +} + +// ResourceQuotaStatus defines the enforced hard limits and observed use. +message ResourceQuotaStatus { + // Hard is the set of enforced hard limits for each named resource. + // More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md + // +optional + map hard = 1; + + // Used is the current observed total usage of the resource in the namespace. + // +optional + map used = 2; +} + +// ResourceRequirements describes the compute resource requirements. +message ResourceRequirements { + // Limits describes the maximum amount of compute resources allowed. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + // +optional + map limits = 1; + + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + // +optional + map requests = 2; +} + +// SELinuxOptions are the labels to be applied to the container +message SELinuxOptions { + // User is a SELinux user label that applies to the container. + // +optional + optional string user = 1; + + // Role is a SELinux role label that applies to the container. + // +optional + optional string role = 2; + + // Type is a SELinux type label that applies to the container. + // +optional + optional string type = 3; + + // Level is SELinux level label that applies to the container. + // +optional + optional string level = 4; +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +message ScaleIOVolumeSource { + // The host address of the ScaleIO API Gateway. + optional string gateway = 1; + + // The name of the storage system as configured in ScaleIO. + optional string system = 2; + + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + optional LocalObjectReference secretRef = 3; + + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + optional bool sslEnabled = 4; + + // The name of the Protection Domain for the configured storage (defaults to "default"). + // +optional + optional string protectionDomain = 5; + + // The Storage Pool associated with the protection domain (defaults to "default"). + // +optional + optional string storagePool = 6; + + // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // +optional + optional string storageMode = 7; + + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + optional string volumeName = 8; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 9; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 10; +} + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +message Secret { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data contains the secret data. Each key must consist of alphanumeric + // characters, '-', '_' or '.'. The serialized form of the secret data is a + // base64 encoded string, representing the arbitrary (possibly non-string) + // data value here. Described in https://tools.ietf.org/html/rfc4648#section-4 + // +optional + map data = 2; + + // stringData allows specifying non-binary secret data in string form. + // It is provided as a write-only convenience method. + // All keys and values are merged into the data field on write, overwriting any existing values. + // It is never output when reading from the API. + // +k8s:conversion-gen=false + // +optional + map stringData = 4; + + // Used to facilitate programmatic handling of secret data. + // +optional + optional string type = 3; +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +message SecretEnvSource { + // The Secret to select from. + optional LocalObjectReference localObjectReference = 1; + + // Specify whether the Secret must be defined + // +optional + optional bool optional = 2; +} + +// SecretKeySelector selects a key of a Secret. +message SecretKeySelector { + // The name of the secret in the pod's namespace to select from. + optional LocalObjectReference localObjectReference = 1; + + // The key of the secret to select from. Must be a valid secret key. + optional string key = 2; + + // Specify whether the Secret or it's key must be defined + // +optional + optional bool optional = 3; +} + +// SecretList is a list of Secret. +message SecretList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of secret objects. + // More info: https://kubernetes.io/docs/concepts/configuration/secret + repeated Secret items = 2; +} + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +message SecretProjection { + optional LocalObjectReference localObjectReference = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Specify whether the Secret or its key must be defined + // +optional + optional bool optional = 4; +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +message SecretVolumeSource { + // Name of the secret in the pod's namespace to use. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + optional string secretName = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 3; + + // Specify whether the Secret or it's keys must be defined + // +optional + optional bool optional = 4; +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +message SecurityContext { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + optional Capabilities capabilities = 1; + + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + optional bool privileged = 2; + + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional SELinuxOptions seLinuxOptions = 3; + + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional int64 runAsUser = 4; + + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional bool runAsNonRoot = 5; + + // Whether this container has a read-only root filesystem. + // Default is false. + // +optional + optional bool readOnlyRootFilesystem = 6; +} + +// SerializedReference is a reference to serialized object. +message SerializedReference { + // The reference to an object in the system. + // +optional + optional ObjectReference reference = 1; +} + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +message Service { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the behavior of a service. + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional ServiceSpec spec = 2; + + // Most recently observed status of the service. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional ServiceStatus status = 3; +} + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +message ServiceAccount { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. + // More info: https://kubernetes.io/docs/concepts/configuration/secret + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated ObjectReference secrets = 2; + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + // +optional + repeated LocalObjectReference imagePullSecrets = 3; + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + optional bool automountServiceAccountToken = 4; +} + +// ServiceAccountList is a list of ServiceAccount objects +message ServiceAccountList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ServiceAccounts. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + repeated ServiceAccount items = 2; +} + +// ServiceList holds a list of services. +message ServiceList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of services + repeated Service items = 2; +} + +// ServicePort contains information on service's port. +message ServicePort { + // The name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. + // Optional if only one ServicePort is defined on this service. + // +optional + optional string name = 1; + + // The IP protocol for this port. Supports "TCP" and "UDP". + // Default is TCP. + // +optional + optional string protocol = 2; + + // The port that will be exposed by this service. + optional int32 port = 3; + + // Number or name of the port to access on the pods targeted by the service. + // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + // If this is a string, it will be looked up as a named port in the + // target Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; + + // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. + // Usually assigned by the system. If specified, it will be allocated to the service + // if unused or else creation of the service will fail. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + // +optional + optional int32 nodePort = 5; +} + +// ServiceProxyOptions is the query options to a Service's proxy call. +message ServiceProxyOptions { + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + // +optional + optional string path = 1; +} + +// ServiceSpec describes the attributes that a user creates on a service. +message ServiceSpec { + // The list of ports that are exposed by this service. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +patchMergeKey=port + // +patchStrategy=merge + repeated ServicePort ports = 1; + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + // +optional + map selector = 2; + + // clusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + optional string clusterIP = 3; + + // type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types + // +optional + optional string type = 4; + + // externalIPs is a list of IP addresses for which nodes in the cluster + // will also accept traffic for this service. These IPs are not managed by + // Kubernetes. The user is responsible for ensuring that traffic arrives + // at a node with this IP. A common example is external load-balancers + // that are not part of the Kubernetes system. + // +optional + repeated string externalIPs = 5; + + // Supports "ClientIP" and "None". Used to maintain session affinity. + // Enable client IP based session affinity. + // Must be ClientIP or None. + // Defaults to None. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + optional string sessionAffinity = 7; + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + optional string loadBalancerIP = 8; + + // If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ + // +optional + repeated string loadBalancerSourceRanges = 9; + + // externalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid DNS name and requires Type to be ExternalName. + // +optional + optional string externalName = 10; + + // externalTrafficPolicy denotes if this Service desires to route external + // traffic to node-local or cluster-wide endpoints. "Local" preserves the + // client source IP and avoids a second hop for LoadBalancer and Nodeport + // type services, but risks potentially imbalanced traffic spreading. + // "Cluster" obscures the client source IP and may cause a second hop to + // another node, but should have good overall load-spreading. + // +optional + optional string externalTrafficPolicy = 11; + + // healthCheckNodePort specifies the healthcheck nodePort for the service. + // If not specified, HealthCheckNodePort is created by the service api + // backend with the allocated nodePort. Will use user-specified nodePort value + // if specified by the client. Only effects when Type is set to LoadBalancer + // and ExternalTrafficPolicy is set to Local. + // +optional + optional int32 healthCheckNodePort = 12; +} + +// ServiceStatus represents the current status of a service. +message ServiceStatus { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + optional LoadBalancerStatus loadBalancer = 1; +} + +// Represents a StorageOS persistent volume resource. +message StorageOSPersistentVolumeSource { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + optional string volumeName = 1; + + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + optional string volumeNamespace = 2; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 3; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; + + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + optional ObjectReference secretRef = 5; +} + +// Represents a StorageOS persistent volume resource. +message StorageOSVolumeSource { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + optional string volumeName = 1; + + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + optional string volumeNamespace = 2; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 3; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; + + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + optional LocalObjectReference secretRef = 5; +} + +// Sysctl defines a kernel parameter to be set +message Sysctl { + // Name of a property to set + optional string name = 1; + + // Value of a property to set + optional string value = 2; +} + +// TCPSocketAction describes an action based on opening a socket +message TCPSocketAction { + // Number or name of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1; + + // Optional: Host name to connect to, defaults to the pod IP. + // +optional + optional string host = 2; +} + +// The node this Taint is attached to has the effect "effect" on +// any pod that that does not tolerate the Taint. +message Taint { + // Required. The taint key to be applied to a node. + // +patchMergeKey=key + // +patchStrategy=merge + optional string key = 1; + + // Required. The taint value corresponding to the taint key. + // +optional + optional string value = 2; + + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + optional string effect = 3; + + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; +} + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +message Toleration { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + // +patchMergeKey=key + // +patchStrategy=merge + optional string key = 1; + + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + optional string operator = 2; + + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + optional string value = 3; + + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + optional string effect = 4; + + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + optional int64 tolerationSeconds = 5; +} + +// Volume represents a named volume in a pod that may be accessed by any container in the pod. +message Volume { + // Volume's name. + // Must be a DNS_LABEL and unique within the pod. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + optional string name = 1; + + // VolumeSource represents the location and type of the mounted volume. + // If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + optional VolumeSource volumeSource = 2; +} + +// VolumeMount describes a mounting of a Volume within a container. +message VolumeMount { + // This must match the Name of a Volume. + optional string name = 1; + + // Mounted read-only if true, read-write otherwise (false or unspecified). + // Defaults to false. + // +optional + optional bool readOnly = 2; + + // Path within the container at which the volume should be mounted. Must + // not contain ':'. + optional string mountPath = 3; + + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + optional string subPath = 4; +} + +// Projection that may be projected along with other supported volume types +message VolumeProjection { + // information about the secret data to project + optional SecretProjection secret = 1; + + // information about the downwardAPI data to project + optional DownwardAPIProjection downwardAPI = 2; + + // information about the configMap data to project + optional ConfigMapProjection configMap = 3; +} + +// Represents the source of a volume to mount. +// Only one of its members may be specified. +message VolumeSource { + // HostPath represents a pre-existing file or directory on the host + // machine that is directly exposed to the container. This is generally + // used for system agents or other privileged things that are allowed + // to see the host machine. Most containers will NOT need this. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + optional HostPathVolumeSource hostPath = 1; + + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + // +optional + optional EmptyDirVolumeSource emptyDir = 2; + + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3; + + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // +optional + optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4; + + // GitRepo represents a git repository at a particular revision. + // +optional + optional GitRepoVolumeSource gitRepo = 5; + + // Secret represents a secret that should populate this volume. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + optional SecretVolumeSource secret = 6; + + // NFS represents an NFS mount on the host that shares a pod's lifetime + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + // +optional + optional NFSVolumeSource nfs = 7; + + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // +optional + optional ISCSIVolumeSource iscsi = 8; + + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // +optional + optional GlusterfsVolumeSource glusterfs = 9; + + // PersistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; + + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // +optional + optional RBDVolumeSource rbd = 11; + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an + // alpha feature and may change in future. + // +optional + optional FlexVolumeSource flexVolume = 12; + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + optional CinderVolumeSource cinder = 13; + + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + optional CephFSVolumeSource cephfs = 14; + + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + optional FlockerVolumeSource flocker = 15; + + // DownwardAPI represents downward API about the pod that should populate this volume + // +optional + optional DownwardAPIVolumeSource downwardAPI = 16; + + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + optional FCVolumeSource fc = 17; + + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + optional AzureFileVolumeSource azureFile = 18; + + // ConfigMap represents a configMap that should populate this volume + // +optional + optional ConfigMapVolumeSource configMap = 19; + + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + optional VsphereVirtualDiskVolumeSource vsphereVolume = 20; + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + optional QuobyteVolumeSource quobyte = 21; + + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + optional AzureDiskVolumeSource azureDisk = 22; + + // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 23; + + // Items for all in one resources secrets, configmaps, and downward API + optional ProjectedVolumeSource projected = 26; + + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + optional PortworxVolumeSource portworxVolume = 24; + + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + optional ScaleIOVolumeSource scaleIO = 25; + + // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + // +optional + optional StorageOSVolumeSource storageos = 27; +} + +// Represents a vSphere volume resource. +message VsphereVirtualDiskVolumeSource { + // Path that identifies vSphere volume vmdk + optional string volumePath = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 2; + + // Storage Policy Based Management (SPBM) profile name. + // +optional + optional string storagePolicyName = 3; + + // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + // +optional + optional string storagePolicyID = 4; +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +message WeightedPodAffinityTerm { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + optional int32 weight = 1; + + // Required. A pod affinity term, associated with the corresponding weight. + optional PodAffinityTerm podAffinityTerm = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/api/v1/meta.go b/vendor/k8s.io/client-go/pkg/api/v1/meta.go new file mode 100644 index 000000000..0e3f5d920 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/meta.go @@ -0,0 +1,108 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func (obj *ObjectMeta) GetObjectMeta() metav1.Object { return obj } + +// Namespace implements metav1.Object for any object with an ObjectMeta typed field. Allows +// fast, direct access to metadata fields for API objects. +func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } +func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } +func (meta *ObjectMeta) GetName() string { return meta.Name } +func (meta *ObjectMeta) SetName(name string) { meta.Name = name } +func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } +func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } +func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } +func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } +func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ObjectMeta) GetGeneration() int64 { return meta.Generation } +func (meta *ObjectMeta) SetGeneration(generation int64) { meta.Generation = generation } +func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } +func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } +func (meta *ObjectMeta) GetCreationTimestamp() metav1.Time { return meta.CreationTimestamp } +func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp metav1.Time) { + meta.CreationTimestamp = creationTimestamp +} +func (meta *ObjectMeta) GetDeletionTimestamp() *metav1.Time { return meta.DeletionTimestamp } +func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *metav1.Time) { + meta.DeletionTimestamp = deletionTimestamp +} +func (meta *ObjectMeta) GetDeletionGracePeriodSeconds() *int64 { return meta.DeletionGracePeriodSeconds } +func (meta *ObjectMeta) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) { + meta.DeletionGracePeriodSeconds = deletionGracePeriodSeconds +} +func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels } +func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } +func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } +func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } +func (meta *ObjectMeta) GetInitializers() *metav1.Initializers { return meta.Initializers } +func (meta *ObjectMeta) SetInitializers(initializers *metav1.Initializers) { + meta.Initializers = initializers +} +func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } +func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } + +func (meta *ObjectMeta) GetOwnerReferences() []metav1.OwnerReference { + ret := make([]metav1.OwnerReference, len(meta.OwnerReferences)) + for i := 0; i < len(meta.OwnerReferences); i++ { + ret[i].Kind = meta.OwnerReferences[i].Kind + ret[i].Name = meta.OwnerReferences[i].Name + ret[i].UID = meta.OwnerReferences[i].UID + ret[i].APIVersion = meta.OwnerReferences[i].APIVersion + if meta.OwnerReferences[i].Controller != nil { + value := *meta.OwnerReferences[i].Controller + ret[i].Controller = &value + } + if meta.OwnerReferences[i].BlockOwnerDeletion != nil { + value := *meta.OwnerReferences[i].BlockOwnerDeletion + ret[i].BlockOwnerDeletion = &value + } + } + return ret +} + +func (meta *ObjectMeta) SetOwnerReferences(references []metav1.OwnerReference) { + newReferences := make([]metav1.OwnerReference, len(references)) + for i := 0; i < len(references); i++ { + newReferences[i].Kind = references[i].Kind + newReferences[i].Name = references[i].Name + newReferences[i].UID = references[i].UID + newReferences[i].APIVersion = references[i].APIVersion + if references[i].Controller != nil { + value := *references[i].Controller + newReferences[i].Controller = &value + } + if references[i].BlockOwnerDeletion != nil { + value := *references[i].BlockOwnerDeletion + newReferences[i].BlockOwnerDeletion = &value + } + } + meta.OwnerReferences = newReferences +} + +func (meta *ObjectMeta) GetClusterName() string { + return meta.ClusterName +} +func (meta *ObjectMeta) SetClusterName(clusterName string) { + meta.ClusterName = clusterName +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/objectreference.go b/vendor/k8s.io/client-go/pkg/api/v1/objectreference.go new file mode 100644 index 000000000..ee5335ee8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/objectreference.go @@ -0,0 +1,33 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that +// intend only to get a reference to that object. This simplifies the event recording interface. +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/client-go/pkg/api/v1/register.go b/vendor/k8s.io/client-go/pkg/api/v1/register.go new file mode 100644 index 000000000..c70d6ac56 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/register.go @@ -0,0 +1,106 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs, addConversionFuncs, addFastPathConversionFuncs) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Pod{}, + &PodList{}, + &PodStatusResult{}, + &PodTemplate{}, + &PodTemplateList{}, + &ReplicationController{}, + &ReplicationControllerList{}, + &Service{}, + &ServiceProxyOptions{}, + &ServiceList{}, + &Endpoints{}, + &EndpointsList{}, + &Node{}, + &NodeList{}, + &NodeProxyOptions{}, + &Binding{}, + &Event{}, + &EventList{}, + &List{}, + &LimitRange{}, + &LimitRangeList{}, + &ResourceQuota{}, + &ResourceQuotaList{}, + &Namespace{}, + &NamespaceList{}, + &Secret{}, + &SecretList{}, + &ServiceAccount{}, + &ServiceAccountList{}, + &PersistentVolume{}, + &PersistentVolumeList{}, + &PersistentVolumeClaim{}, + &PersistentVolumeClaimList{}, + &PodAttachOptions{}, + &PodLogOptions{}, + &PodExecOptions{}, + &PodPortForwardOptions{}, + &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, + &SerializedReference{}, + &RangeAllocation{}, + &ConfigMap{}, + &ConfigMapList{}, + ) + + // Add common types + scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{}) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/resource.go b/vendor/k8s.io/client-go/pkg/api/v1/resource.go new file mode 100644 index 000000000..0d1c1dccd --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/resource.go @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" +) + +// Returns string version of ResourceName. +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) NvidiaGPU() *resource.Quantity { + if val, ok := (*self)[ResourceNvidiaGPU]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) StorageOverlay() *resource.Quantity { + if val, ok := (*self)[ResourceStorageOverlay]; ok { + return &val + } + return &resource.Quantity{} +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/taint.go b/vendor/k8s.io/client-go/pkg/api/v1/taint.go new file mode 100644 index 000000000..7b606a309 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/taint.go @@ -0,0 +1,33 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import "fmt" + +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch *Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +func (t *Taint) ToString() string { + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/toleration.go b/vendor/k8s.io/client-go/pkg/api/v1/toleration.go new file mode 100644 index 000000000..b203d335b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/toleration.go @@ -0,0 +1,56 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , +// if the two tolerations have same combination, regard as they match. +// TODO: uniqueness check for tolerations in api validations. +func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { + return t.Key == tolerationToMatch.Key && + t.Effect == tolerationToMatch.Effect && + t.Operator == tolerationToMatch.Operator && + t.Value == tolerationToMatch.Value +} + +// ToleratesTaint checks if the toleration tolerates the taint. +// The matching follows the rules below: +// (1) Empty toleration.effect means to match all taint effects, +// otherwise taint effect must equal to toleration.effect. +// (2) If toleration.operator is 'Exists', it means to match all taint values. +// (3) Empty toleration.key means to match all taint keys. +// If toleration.key is empty, toleration.operator must be 'Exists'; +// this combination means to match all taint values and all taint keys. +func (t *Toleration) ToleratesTaint(taint *Taint) bool { + if len(t.Effect) > 0 && t.Effect != taint.Effect { + return false + } + + if len(t.Key) > 0 && t.Key != taint.Key { + return false + } + + // TODO: Use proper defaulting when Toleration becomes a field of PodSpec + switch t.Operator { + // empty operator means Equal + case "", TolerationOpEqual: + return t.Value == taint.Value + case TolerationOpExists: + return true + default: + return false + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/types.generated.go b/vendor/k8s.io/client-go/pkg/api/v1/types.generated.go new file mode 100644 index 000000000..dc5ef1f52 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/types.generated.go @@ -0,0 +1,76883 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg3_resource "k8s.io/apimachinery/pkg/api/resource" + pkg2_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg5_runtime "k8s.io/apimachinery/pkg/runtime" + pkg1_types "k8s.io/apimachinery/pkg/types" + pkg4_intstr "k8s.io/apimachinery/pkg/util/intstr" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg3_resource.Quantity + var v1 pkg2_v1.Time + var v2 pkg5_runtime.RawExtension + var v3 pkg1_types.UID + var v4 pkg4_intstr.IntOrString + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 + } +} + +func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [16]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.GenerateName != "" + yyq2[2] = x.Namespace != "" + yyq2[3] = x.SelfLink != "" + yyq2[4] = x.UID != "" + yyq2[5] = x.ResourceVersion != "" + yyq2[6] = x.Generation != 0 + yyq2[7] = true + yyq2[8] = x.DeletionTimestamp != nil + yyq2[9] = x.DeletionGracePeriodSeconds != nil + yyq2[10] = len(x.Labels) != 0 + yyq2[11] = len(x.Annotations) != 0 + yyq2[12] = len(x.OwnerReferences) != 0 + yyq2[13] = x.Initializers != nil + yyq2[14] = len(x.Finalizers) != 0 + yyq2[15] = x.ClusterName != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(16) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("generateName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selfLink")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(x.UID) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.EncExt(x.UID) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(x.Generation)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("generation")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(x.Generation)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yy25 := &x.CreationTimestamp + yym26 := z.EncBinary() + _ = yym26 + if false { + } else if z.HasExtensions() && z.EncExt(yy25) { + } else if yym26 { + z.EncBinaryMarshal(yy25) + } else if !yym26 && z.IsJSONHandle() { + z.EncJSONMarshal(yy25) + } else { + z.EncFallback(yy25) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("creationTimestamp")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy27 := &x.CreationTimestamp + yym28 := z.EncBinary() + _ = yym28 + if false { + } else if z.HasExtensions() && z.EncExt(yy27) { + } else if yym28 { + z.EncBinaryMarshal(yy27) + } else if !yym28 && z.IsJSONHandle() { + z.EncJSONMarshal(yy27) + } else { + z.EncFallback(yy27) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.DeletionTimestamp == nil { + r.EncodeNil() + } else { + yym30 := z.EncBinary() + _ = yym30 + if false { + } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { + } else if yym30 { + z.EncBinaryMarshal(x.DeletionTimestamp) + } else if !yym30 && z.IsJSONHandle() { + z.EncJSONMarshal(x.DeletionTimestamp) + } else { + z.EncFallback(x.DeletionTimestamp) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("deletionTimestamp")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DeletionTimestamp == nil { + r.EncodeNil() + } else { + yym31 := z.EncBinary() + _ = yym31 + if false { + } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { + } else if yym31 { + z.EncBinaryMarshal(x.DeletionTimestamp) + } else if !yym31 && z.IsJSONHandle() { + z.EncJSONMarshal(x.DeletionTimestamp) + } else { + z.EncFallback(x.DeletionTimestamp) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.DeletionGracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy33 := *x.DeletionGracePeriodSeconds + yym34 := z.EncBinary() + _ = yym34 + if false { + } else { + r.EncodeInt(int64(yy33)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("deletionGracePeriodSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DeletionGracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy35 := *x.DeletionGracePeriodSeconds + yym36 := z.EncBinary() + _ = yym36 + if false { + } else { + r.EncodeInt(int64(yy35)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.Labels == nil { + r.EncodeNil() + } else { + yym38 := z.EncBinary() + _ = yym38 + if false { + } else { + z.F.EncMapStringStringV(x.Labels, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("labels")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Labels == nil { + r.EncodeNil() + } else { + yym39 := z.EncBinary() + _ = yym39 + if false { + } else { + z.F.EncMapStringStringV(x.Labels, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.Annotations == nil { + r.EncodeNil() + } else { + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + z.F.EncMapStringStringV(x.Annotations, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("annotations")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Annotations == nil { + r.EncodeNil() + } else { + yym42 := z.EncBinary() + _ = yym42 + if false { + } else { + z.F.EncMapStringStringV(x.Annotations, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.OwnerReferences == nil { + r.EncodeNil() + } else { + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + h.encSlicev1_OwnerReference(([]pkg2_v1.OwnerReference)(x.OwnerReferences), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ownerReferences")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.OwnerReferences == nil { + r.EncodeNil() + } else { + yym45 := z.EncBinary() + _ = yym45 + if false { + } else { + h.encSlicev1_OwnerReference(([]pkg2_v1.OwnerReference)(x.OwnerReferences), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.Initializers == nil { + r.EncodeNil() + } else { + yym47 := z.EncBinary() + _ = yym47 + if false { + } else if z.HasExtensions() && z.EncExt(x.Initializers) { + } else { + z.EncFallback(x.Initializers) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("initializers")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Initializers == nil { + r.EncodeNil() + } else { + yym48 := z.EncBinary() + _ = yym48 + if false { + } else if z.HasExtensions() && z.EncExt(x.Initializers) { + } else { + z.EncFallback(x.Initializers) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + if x.Finalizers == nil { + r.EncodeNil() + } else { + yym50 := z.EncBinary() + _ = yym50 + if false { + } else { + z.F.EncSliceStringV(x.Finalizers, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("finalizers")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Finalizers == nil { + r.EncodeNil() + } else { + yym51 := z.EncBinary() + _ = yym51 + if false { + } else { + z.F.EncSliceStringV(x.Finalizers, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + yym53 := z.EncBinary() + _ = yym53 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("clusterName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym54 := z.EncBinary() + _ = yym54 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectMeta) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "generateName": + if r.TryDecodeAsNil() { + x.GenerateName = "" + } else { + yyv6 := &x.GenerateName + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv8 := &x.Namespace + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "selfLink": + if r.TryDecodeAsNil() { + x.SelfLink = "" + } else { + yyv10 := &x.SelfLink + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "uid": + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv12 := &x.UID + yym13 := z.DecBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.DecExt(yyv12) { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "resourceVersion": + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv14 := &x.ResourceVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "generation": + if r.TryDecodeAsNil() { + x.Generation = 0 + } else { + yyv16 := &x.Generation + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*int64)(yyv16)) = int64(r.DecodeInt(64)) + } + } + case "creationTimestamp": + if r.TryDecodeAsNil() { + x.CreationTimestamp = pkg2_v1.Time{} + } else { + yyv18 := &x.CreationTimestamp + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else if yym19 { + z.DecBinaryUnmarshal(yyv18) + } else if !yym19 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv18) + } else { + z.DecFallback(yyv18, false) + } + } + case "deletionTimestamp": + if r.TryDecodeAsNil() { + if x.DeletionTimestamp != nil { + x.DeletionTimestamp = nil + } + } else { + if x.DeletionTimestamp == nil { + x.DeletionTimestamp = new(pkg2_v1.Time) + } + yym21 := z.DecBinary() + _ = yym21 + if false { + } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { + } else if yym21 { + z.DecBinaryUnmarshal(x.DeletionTimestamp) + } else if !yym21 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.DeletionTimestamp) + } else { + z.DecFallback(x.DeletionTimestamp, false) + } + } + case "deletionGracePeriodSeconds": + if r.TryDecodeAsNil() { + if x.DeletionGracePeriodSeconds != nil { + x.DeletionGracePeriodSeconds = nil + } + } else { + if x.DeletionGracePeriodSeconds == nil { + x.DeletionGracePeriodSeconds = new(int64) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + case "labels": + if r.TryDecodeAsNil() { + x.Labels = nil + } else { + yyv24 := &x.Labels + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + z.F.DecMapStringStringX(yyv24, false, d) + } + } + case "annotations": + if r.TryDecodeAsNil() { + x.Annotations = nil + } else { + yyv26 := &x.Annotations + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + z.F.DecMapStringStringX(yyv26, false, d) + } + } + case "ownerReferences": + if r.TryDecodeAsNil() { + x.OwnerReferences = nil + } else { + yyv28 := &x.OwnerReferences + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + h.decSlicev1_OwnerReference((*[]pkg2_v1.OwnerReference)(yyv28), d) + } + } + case "initializers": + if r.TryDecodeAsNil() { + if x.Initializers != nil { + x.Initializers = nil + } + } else { + if x.Initializers == nil { + x.Initializers = new(pkg2_v1.Initializers) + } + yym31 := z.DecBinary() + _ = yym31 + if false { + } else if z.HasExtensions() && z.DecExt(x.Initializers) { + } else { + z.DecFallback(x.Initializers, false) + } + } + case "finalizers": + if r.TryDecodeAsNil() { + x.Finalizers = nil + } else { + yyv32 := &x.Finalizers + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + z.F.DecSliceStringX(yyv32, false, d) + } + } + case "clusterName": + if r.TryDecodeAsNil() { + x.ClusterName = "" + } else { + yyv34 := &x.ClusterName + yym35 := z.DecBinary() + _ = yym35 + if false { + } else { + *((*string)(yyv34)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj36 int + var yyb36 bool + var yyhl36 bool = l >= 0 + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv37 := &x.Name + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.GenerateName = "" + } else { + yyv39 := &x.GenerateName + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv41 := &x.Namespace + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*string)(yyv41)) = r.DecodeString() + } + } + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SelfLink = "" + } else { + yyv43 := &x.SelfLink + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*string)(yyv43)) = r.DecodeString() + } + } + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv45 := &x.UID + yym46 := z.DecBinary() + _ = yym46 + if false { + } else if z.HasExtensions() && z.DecExt(yyv45) { + } else { + *((*string)(yyv45)) = r.DecodeString() + } + } + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv47 := &x.ResourceVersion + yym48 := z.DecBinary() + _ = yym48 + if false { + } else { + *((*string)(yyv47)) = r.DecodeString() + } + } + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Generation = 0 + } else { + yyv49 := &x.Generation + yym50 := z.DecBinary() + _ = yym50 + if false { + } else { + *((*int64)(yyv49)) = int64(r.DecodeInt(64)) + } + } + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CreationTimestamp = pkg2_v1.Time{} + } else { + yyv51 := &x.CreationTimestamp + yym52 := z.DecBinary() + _ = yym52 + if false { + } else if z.HasExtensions() && z.DecExt(yyv51) { + } else if yym52 { + z.DecBinaryUnmarshal(yyv51) + } else if !yym52 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv51) + } else { + z.DecFallback(yyv51, false) + } + } + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DeletionTimestamp != nil { + x.DeletionTimestamp = nil + } + } else { + if x.DeletionTimestamp == nil { + x.DeletionTimestamp = new(pkg2_v1.Time) + } + yym54 := z.DecBinary() + _ = yym54 + if false { + } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { + } else if yym54 { + z.DecBinaryUnmarshal(x.DeletionTimestamp) + } else if !yym54 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.DeletionTimestamp) + } else { + z.DecFallback(x.DeletionTimestamp, false) + } + } + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DeletionGracePeriodSeconds != nil { + x.DeletionGracePeriodSeconds = nil + } + } else { + if x.DeletionGracePeriodSeconds == nil { + x.DeletionGracePeriodSeconds = new(int64) + } + yym56 := z.DecBinary() + _ = yym56 + if false { + } else { + *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Labels = nil + } else { + yyv57 := &x.Labels + yym58 := z.DecBinary() + _ = yym58 + if false { + } else { + z.F.DecMapStringStringX(yyv57, false, d) + } + } + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Annotations = nil + } else { + yyv59 := &x.Annotations + yym60 := z.DecBinary() + _ = yym60 + if false { + } else { + z.F.DecMapStringStringX(yyv59, false, d) + } + } + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.OwnerReferences = nil + } else { + yyv61 := &x.OwnerReferences + yym62 := z.DecBinary() + _ = yym62 + if false { + } else { + h.decSlicev1_OwnerReference((*[]pkg2_v1.OwnerReference)(yyv61), d) + } + } + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Initializers != nil { + x.Initializers = nil + } + } else { + if x.Initializers == nil { + x.Initializers = new(pkg2_v1.Initializers) + } + yym64 := z.DecBinary() + _ = yym64 + if false { + } else if z.HasExtensions() && z.DecExt(x.Initializers) { + } else { + z.DecFallback(x.Initializers, false) + } + } + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Finalizers = nil + } else { + yyv65 := &x.Finalizers + yym66 := z.DecBinary() + _ = yym66 + if false { + } else { + z.F.DecSliceStringX(yyv65, false, d) + } + } + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ClusterName = "" + } else { + yyv67 := &x.ClusterName + yym68 := z.DecBinary() + _ = yym68 + if false { + } else { + *((*string)(yyv67)) = r.DecodeString() + } + } + for { + yyj36++ + if yyhl36 { + yyb36 = yyj36 > l + } else { + yyb36 = r.CheckBreak() + } + if yyb36 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj36-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [28]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.VolumeSource.HostPath != nil && x.HostPath != nil + yyq2[2] = x.VolumeSource.EmptyDir != nil && x.EmptyDir != nil + yyq2[3] = x.VolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil + yyq2[4] = x.VolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil + yyq2[5] = x.VolumeSource.GitRepo != nil && x.GitRepo != nil + yyq2[6] = x.VolumeSource.Secret != nil && x.Secret != nil + yyq2[7] = x.VolumeSource.NFS != nil && x.NFS != nil + yyq2[8] = x.VolumeSource.ISCSI != nil && x.ISCSI != nil + yyq2[9] = x.VolumeSource.Glusterfs != nil && x.Glusterfs != nil + yyq2[10] = x.VolumeSource.PersistentVolumeClaim != nil && x.PersistentVolumeClaim != nil + yyq2[11] = x.VolumeSource.RBD != nil && x.RBD != nil + yyq2[12] = x.VolumeSource.FlexVolume != nil && x.FlexVolume != nil + yyq2[13] = x.VolumeSource.Cinder != nil && x.Cinder != nil + yyq2[14] = x.VolumeSource.CephFS != nil && x.CephFS != nil + yyq2[15] = x.VolumeSource.Flocker != nil && x.Flocker != nil + yyq2[16] = x.VolumeSource.DownwardAPI != nil && x.DownwardAPI != nil + yyq2[17] = x.VolumeSource.FC != nil && x.FC != nil + yyq2[18] = x.VolumeSource.AzureFile != nil && x.AzureFile != nil + yyq2[19] = x.VolumeSource.ConfigMap != nil && x.ConfigMap != nil + yyq2[20] = x.VolumeSource.VsphereVolume != nil && x.VsphereVolume != nil + yyq2[21] = x.VolumeSource.Quobyte != nil && x.Quobyte != nil + yyq2[22] = x.VolumeSource.AzureDisk != nil && x.AzureDisk != nil + yyq2[23] = x.VolumeSource.PhotonPersistentDisk != nil && x.PhotonPersistentDisk != nil + yyq2[24] = x.VolumeSource.Projected != nil && x.Projected != nil + yyq2[25] = x.VolumeSource.PortworxVolume != nil && x.PortworxVolume != nil + yyq2[26] = x.VolumeSource.ScaleIO != nil && x.ScaleIO != nil + yyq2[27] = x.VolumeSource.StorageOS != nil && x.StorageOS != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(28) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + var yyn6 bool + if x.VolumeSource.HostPath == nil { + yyn6 = true + goto LABEL6 + } + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn6 { + r.EncodeNil() + } else { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } + } + } + var yyn9 bool + if x.VolumeSource.EmptyDir == nil { + yyn9 = true + goto LABEL9 + } + LABEL9: + if yyr2 || yy2arr2 { + if yyn9 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.EmptyDir == nil { + r.EncodeNil() + } else { + x.EmptyDir.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("emptyDir")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn9 { + r.EncodeNil() + } else { + if x.EmptyDir == nil { + r.EncodeNil() + } else { + x.EmptyDir.CodecEncodeSelf(e) + } + } + } + } + var yyn12 bool + if x.VolumeSource.GCEPersistentDisk == nil { + yyn12 = true + goto LABEL12 + } + LABEL12: + if yyr2 || yy2arr2 { + if yyn12 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn12 { + r.EncodeNil() + } else { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn15 bool + if x.VolumeSource.AWSElasticBlockStore == nil { + yyn15 = true + goto LABEL15 + } + LABEL15: + if yyr2 || yy2arr2 { + if yyn15 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn15 { + r.EncodeNil() + } else { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } + } + } + var yyn18 bool + if x.VolumeSource.GitRepo == nil { + yyn18 = true + goto LABEL18 + } + LABEL18: + if yyr2 || yy2arr2 { + if yyn18 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.GitRepo == nil { + r.EncodeNil() + } else { + x.GitRepo.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gitRepo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn18 { + r.EncodeNil() + } else { + if x.GitRepo == nil { + r.EncodeNil() + } else { + x.GitRepo.CodecEncodeSelf(e) + } + } + } + } + var yyn21 bool + if x.VolumeSource.Secret == nil { + yyn21 = true + goto LABEL21 + } + LABEL21: + if yyr2 || yy2arr2 { + if yyn21 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secret")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn21 { + r.EncodeNil() + } else { + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } + } + } + var yyn24 bool + if x.VolumeSource.NFS == nil { + yyn24 = true + goto LABEL24 + } + LABEL24: + if yyr2 || yy2arr2 { + if yyn24 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn24 { + r.EncodeNil() + } else { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } + } + } + var yyn27 bool + if x.VolumeSource.ISCSI == nil { + yyn27 = true + goto LABEL27 + } + LABEL27: + if yyr2 || yy2arr2 { + if yyn27 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iscsi")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn27 { + r.EncodeNil() + } else { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } + } + } + var yyn30 bool + if x.VolumeSource.Glusterfs == nil { + yyn30 = true + goto LABEL30 + } + LABEL30: + if yyr2 || yy2arr2 { + if yyn30 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn30 { + r.EncodeNil() + } else { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } + } + } + var yyn33 bool + if x.VolumeSource.PersistentVolumeClaim == nil { + yyn33 = true + goto LABEL33 + } + LABEL33: + if yyr2 || yy2arr2 { + if yyn33 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.PersistentVolumeClaim == nil { + r.EncodeNil() + } else { + x.PersistentVolumeClaim.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn33 { + r.EncodeNil() + } else { + if x.PersistentVolumeClaim == nil { + r.EncodeNil() + } else { + x.PersistentVolumeClaim.CodecEncodeSelf(e) + } + } + } + } + var yyn36 bool + if x.VolumeSource.RBD == nil { + yyn36 = true + goto LABEL36 + } + LABEL36: + if yyr2 || yy2arr2 { + if yyn36 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rbd")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn36 { + r.EncodeNil() + } else { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } + } + } + var yyn39 bool + if x.VolumeSource.FlexVolume == nil { + yyn39 = true + goto LABEL39 + } + LABEL39: + if yyr2 || yy2arr2 { + if yyn39 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn39 { + r.EncodeNil() + } else { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn42 bool + if x.VolumeSource.Cinder == nil { + yyn42 = true + goto LABEL42 + } + LABEL42: + if yyr2 || yy2arr2 { + if yyn42 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cinder")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn42 { + r.EncodeNil() + } else { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } + } + } + var yyn45 bool + if x.VolumeSource.CephFS == nil { + yyn45 = true + goto LABEL45 + } + LABEL45: + if yyr2 || yy2arr2 { + if yyn45 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cephfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn45 { + r.EncodeNil() + } else { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } + } + } + var yyn48 bool + if x.VolumeSource.Flocker == nil { + yyn48 = true + goto LABEL48 + } + LABEL48: + if yyr2 || yy2arr2 { + if yyn48 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flocker")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn48 { + r.EncodeNil() + } else { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } + } + } + var yyn51 bool + if x.VolumeSource.DownwardAPI == nil { + yyn51 = true + goto LABEL51 + } + LABEL51: + if yyr2 || yy2arr2 { + if yyn51 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn51 { + r.EncodeNil() + } else { + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } + } + } + var yyn54 bool + if x.VolumeSource.FC == nil { + yyn54 = true + goto LABEL54 + } + LABEL54: + if yyr2 || yy2arr2 { + if yyn54 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fc")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn54 { + r.EncodeNil() + } else { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } + } + } + var yyn57 bool + if x.VolumeSource.AzureFile == nil { + yyn57 = true + goto LABEL57 + } + LABEL57: + if yyr2 || yy2arr2 { + if yyn57 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureFile")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn57 { + r.EncodeNil() + } else { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } + } + } + var yyn60 bool + if x.VolumeSource.ConfigMap == nil { + yyn60 = true + goto LABEL60 + } + LABEL60: + if yyr2 || yy2arr2 { + if yyn60 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMap")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn60 { + r.EncodeNil() + } else { + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } + } + } + var yyn63 bool + if x.VolumeSource.VsphereVolume == nil { + yyn63 = true + goto LABEL63 + } + LABEL63: + if yyr2 || yy2arr2 { + if yyn63 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[20] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[20] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn63 { + r.EncodeNil() + } else { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn66 bool + if x.VolumeSource.Quobyte == nil { + yyn66 = true + goto LABEL66 + } + LABEL66: + if yyr2 || yy2arr2 { + if yyn66 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[21] { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[21] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("quobyte")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn66 { + r.EncodeNil() + } else { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } + } + } + var yyn69 bool + if x.VolumeSource.AzureDisk == nil { + yyn69 = true + goto LABEL69 + } + LABEL69: + if yyr2 || yy2arr2 { + if yyn69 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[22] { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[22] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn69 { + r.EncodeNil() + } else { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn72 bool + if x.VolumeSource.PhotonPersistentDisk == nil { + yyn72 = true + goto LABEL72 + } + LABEL72: + if yyr2 || yy2arr2 { + if yyn72 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[23] { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[23] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn72 { + r.EncodeNil() + } else { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn75 bool + if x.VolumeSource.Projected == nil { + yyn75 = true + goto LABEL75 + } + LABEL75: + if yyr2 || yy2arr2 { + if yyn75 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[24] { + if x.Projected == nil { + r.EncodeNil() + } else { + x.Projected.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[24] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("projected")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn75 { + r.EncodeNil() + } else { + if x.Projected == nil { + r.EncodeNil() + } else { + x.Projected.CodecEncodeSelf(e) + } + } + } + } + var yyn78 bool + if x.VolumeSource.PortworxVolume == nil { + yyn78 = true + goto LABEL78 + } + LABEL78: + if yyr2 || yy2arr2 { + if yyn78 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[25] { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[25] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn78 { + r.EncodeNil() + } else { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn81 bool + if x.VolumeSource.ScaleIO == nil { + yyn81 = true + goto LABEL81 + } + LABEL81: + if yyr2 || yy2arr2 { + if yyn81 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[26] { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[26] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn81 { + r.EncodeNil() + } else { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } + } + } + var yyn84 bool + if x.VolumeSource.StorageOS == nil { + yyn84 = true + goto LABEL84 + } + LABEL84: + if yyr2 || yy2arr2 { + if yyn84 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[27] { + if x.StorageOS == nil { + r.EncodeNil() + } else { + x.StorageOS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[27] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storageos")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn84 { + r.EncodeNil() + } else { + if x.StorageOS == nil { + r.EncodeNil() + } else { + x.StorageOS.CodecEncodeSelf(e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Volume) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Volume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "hostPath": + if x.VolumeSource.HostPath == nil { + x.VolumeSource.HostPath = new(HostPathVolumeSource) + } + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + case "emptyDir": + if x.VolumeSource.EmptyDir == nil { + x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) + } + if r.TryDecodeAsNil() { + if x.EmptyDir != nil { + x.EmptyDir = nil + } + } else { + if x.EmptyDir == nil { + x.EmptyDir = new(EmptyDirVolumeSource) + } + x.EmptyDir.CodecDecodeSelf(d) + } + case "gcePersistentDisk": + if x.VolumeSource.GCEPersistentDisk == nil { + x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + case "awsElasticBlockStore": + if x.VolumeSource.AWSElasticBlockStore == nil { + x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + case "gitRepo": + if x.VolumeSource.GitRepo == nil { + x.VolumeSource.GitRepo = new(GitRepoVolumeSource) + } + if r.TryDecodeAsNil() { + if x.GitRepo != nil { + x.GitRepo = nil + } + } else { + if x.GitRepo == nil { + x.GitRepo = new(GitRepoVolumeSource) + } + x.GitRepo.CodecDecodeSelf(d) + } + case "secret": + if x.VolumeSource.Secret == nil { + x.VolumeSource.Secret = new(SecretVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretVolumeSource) + } + x.Secret.CodecDecodeSelf(d) + } + case "nfs": + if x.VolumeSource.NFS == nil { + x.VolumeSource.NFS = new(NFSVolumeSource) + } + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + case "iscsi": + if x.VolumeSource.ISCSI == nil { + x.VolumeSource.ISCSI = new(ISCSIVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + case "glusterfs": + if x.VolumeSource.Glusterfs == nil { + x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + case "persistentVolumeClaim": + if x.VolumeSource.PersistentVolumeClaim == nil { + x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PersistentVolumeClaim != nil { + x.PersistentVolumeClaim = nil + } + } else { + if x.PersistentVolumeClaim == nil { + x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + x.PersistentVolumeClaim.CodecDecodeSelf(d) + } + case "rbd": + if x.VolumeSource.RBD == nil { + x.VolumeSource.RBD = new(RBDVolumeSource) + } + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + case "flexVolume": + if x.VolumeSource.FlexVolume == nil { + x.VolumeSource.FlexVolume = new(FlexVolumeSource) + } + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + case "cinder": + if x.VolumeSource.Cinder == nil { + x.VolumeSource.Cinder = new(CinderVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + case "cephfs": + if x.VolumeSource.CephFS == nil { + x.VolumeSource.CephFS = new(CephFSVolumeSource) + } + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + case "flocker": + if x.VolumeSource.Flocker == nil { + x.VolumeSource.Flocker = new(FlockerVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + case "downwardAPI": + if x.VolumeSource.DownwardAPI == nil { + x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) + } + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIVolumeSource) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + case "fc": + if x.VolumeSource.FC == nil { + x.VolumeSource.FC = new(FCVolumeSource) + } + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + case "azureFile": + if x.VolumeSource.AzureFile == nil { + x.VolumeSource.AzureFile = new(AzureFileVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + case "configMap": + if x.VolumeSource.ConfigMap == nil { + x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapVolumeSource) + } + x.ConfigMap.CodecDecodeSelf(d) + } + case "vsphereVolume": + if x.VolumeSource.VsphereVolume == nil { + x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + case "quobyte": + if x.VolumeSource.Quobyte == nil { + x.VolumeSource.Quobyte = new(QuobyteVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + case "azureDisk": + if x.VolumeSource.AzureDisk == nil { + x.VolumeSource.AzureDisk = new(AzureDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + case "photonPersistentDisk": + if x.VolumeSource.PhotonPersistentDisk == nil { + x.VolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + case "projected": + if x.VolumeSource.Projected == nil { + x.VolumeSource.Projected = new(ProjectedVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Projected != nil { + x.Projected = nil + } + } else { + if x.Projected == nil { + x.Projected = new(ProjectedVolumeSource) + } + x.Projected.CodecDecodeSelf(d) + } + case "portworxVolume": + if x.VolumeSource.PortworxVolume == nil { + x.VolumeSource.PortworxVolume = new(PortworxVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + case "scaleIO": + if x.VolumeSource.ScaleIO == nil { + x.VolumeSource.ScaleIO = new(ScaleIOVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + case "storageos": + if x.VolumeSource.StorageOS == nil { + x.VolumeSource.StorageOS = new(StorageOSVolumeSource) + } + if r.TryDecodeAsNil() { + if x.StorageOS != nil { + x.StorageOS = nil + } + } else { + if x.StorageOS == nil { + x.StorageOS = new(StorageOSVolumeSource) + } + x.StorageOS.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj33 int + var yyb33 bool + var yyhl33 bool = l >= 0 + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv34 := &x.Name + yym35 := z.DecBinary() + _ = yym35 + if false { + } else { + *((*string)(yyv34)) = r.DecodeString() + } + } + if x.VolumeSource.HostPath == nil { + x.VolumeSource.HostPath = new(HostPathVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + if x.VolumeSource.EmptyDir == nil { + x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.EmptyDir != nil { + x.EmptyDir = nil + } + } else { + if x.EmptyDir == nil { + x.EmptyDir = new(EmptyDirVolumeSource) + } + x.EmptyDir.CodecDecodeSelf(d) + } + if x.VolumeSource.GCEPersistentDisk == nil { + x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + if x.VolumeSource.AWSElasticBlockStore == nil { + x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + if x.VolumeSource.GitRepo == nil { + x.VolumeSource.GitRepo = new(GitRepoVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GitRepo != nil { + x.GitRepo = nil + } + } else { + if x.GitRepo == nil { + x.GitRepo = new(GitRepoVolumeSource) + } + x.GitRepo.CodecDecodeSelf(d) + } + if x.VolumeSource.Secret == nil { + x.VolumeSource.Secret = new(SecretVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretVolumeSource) + } + x.Secret.CodecDecodeSelf(d) + } + if x.VolumeSource.NFS == nil { + x.VolumeSource.NFS = new(NFSVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + if x.VolumeSource.ISCSI == nil { + x.VolumeSource.ISCSI = new(ISCSIVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + if x.VolumeSource.Glusterfs == nil { + x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + if x.VolumeSource.PersistentVolumeClaim == nil { + x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PersistentVolumeClaim != nil { + x.PersistentVolumeClaim = nil + } + } else { + if x.PersistentVolumeClaim == nil { + x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + x.PersistentVolumeClaim.CodecDecodeSelf(d) + } + if x.VolumeSource.RBD == nil { + x.VolumeSource.RBD = new(RBDVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + if x.VolumeSource.FlexVolume == nil { + x.VolumeSource.FlexVolume = new(FlexVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + if x.VolumeSource.Cinder == nil { + x.VolumeSource.Cinder = new(CinderVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + if x.VolumeSource.CephFS == nil { + x.VolumeSource.CephFS = new(CephFSVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + if x.VolumeSource.Flocker == nil { + x.VolumeSource.Flocker = new(FlockerVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + if x.VolumeSource.DownwardAPI == nil { + x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIVolumeSource) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + if x.VolumeSource.FC == nil { + x.VolumeSource.FC = new(FCVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + if x.VolumeSource.AzureFile == nil { + x.VolumeSource.AzureFile = new(AzureFileVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + if x.VolumeSource.ConfigMap == nil { + x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapVolumeSource) + } + x.ConfigMap.CodecDecodeSelf(d) + } + if x.VolumeSource.VsphereVolume == nil { + x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + if x.VolumeSource.Quobyte == nil { + x.VolumeSource.Quobyte = new(QuobyteVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + if x.VolumeSource.AzureDisk == nil { + x.VolumeSource.AzureDisk = new(AzureDiskVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + if x.VolumeSource.PhotonPersistentDisk == nil { + x.VolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + if x.VolumeSource.Projected == nil { + x.VolumeSource.Projected = new(ProjectedVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Projected != nil { + x.Projected = nil + } + } else { + if x.Projected == nil { + x.Projected = new(ProjectedVolumeSource) + } + x.Projected.CodecDecodeSelf(d) + } + if x.VolumeSource.PortworxVolume == nil { + x.VolumeSource.PortworxVolume = new(PortworxVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + if x.VolumeSource.ScaleIO == nil { + x.VolumeSource.ScaleIO = new(ScaleIOVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + if x.VolumeSource.StorageOS == nil { + x.VolumeSource.StorageOS = new(StorageOSVolumeSource) + } + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StorageOS != nil { + x.StorageOS = nil + } + } else { + if x.StorageOS == nil { + x.StorageOS = new(StorageOSVolumeSource) + } + x.StorageOS.CodecDecodeSelf(d) + } + for { + yyj33++ + if yyhl33 { + yyb33 = yyj33 > l + } else { + yyb33 = r.CheckBreak() + } + if yyb33 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj33-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [27]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.HostPath != nil + yyq2[1] = x.EmptyDir != nil + yyq2[2] = x.GCEPersistentDisk != nil + yyq2[3] = x.AWSElasticBlockStore != nil + yyq2[4] = x.GitRepo != nil + yyq2[5] = x.Secret != nil + yyq2[6] = x.NFS != nil + yyq2[7] = x.ISCSI != nil + yyq2[8] = x.Glusterfs != nil + yyq2[9] = x.PersistentVolumeClaim != nil + yyq2[10] = x.RBD != nil + yyq2[11] = x.FlexVolume != nil + yyq2[12] = x.Cinder != nil + yyq2[13] = x.CephFS != nil + yyq2[14] = x.Flocker != nil + yyq2[15] = x.DownwardAPI != nil + yyq2[16] = x.FC != nil + yyq2[17] = x.AzureFile != nil + yyq2[18] = x.ConfigMap != nil + yyq2[19] = x.VsphereVolume != nil + yyq2[20] = x.Quobyte != nil + yyq2[21] = x.AzureDisk != nil + yyq2[22] = x.PhotonPersistentDisk != nil + yyq2[23] = x.Projected != nil + yyq2[24] = x.PortworxVolume != nil + yyq2[25] = x.ScaleIO != nil + yyq2[26] = x.StorageOS != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(27) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.EmptyDir == nil { + r.EncodeNil() + } else { + x.EmptyDir.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("emptyDir")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.EmptyDir == nil { + r.EncodeNil() + } else { + x.EmptyDir.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.GitRepo == nil { + r.EncodeNil() + } else { + x.GitRepo.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gitRepo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.GitRepo == nil { + r.EncodeNil() + } else { + x.GitRepo.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secret")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iscsi")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.PersistentVolumeClaim == nil { + r.EncodeNil() + } else { + x.PersistentVolumeClaim.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PersistentVolumeClaim == nil { + r.EncodeNil() + } else { + x.PersistentVolumeClaim.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rbd")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cinder")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cephfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flocker")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fc")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureFile")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMap")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[20] { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[20] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("quobyte")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[21] { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[21] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[22] { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[22] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[23] { + if x.Projected == nil { + r.EncodeNil() + } else { + x.Projected.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[23] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("projected")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Projected == nil { + r.EncodeNil() + } else { + x.Projected.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[24] { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[24] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[25] { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[25] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[26] { + if x.StorageOS == nil { + r.EncodeNil() + } else { + x.StorageOS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[26] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storageos")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StorageOS == nil { + r.EncodeNil() + } else { + x.StorageOS.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *VolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *VolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "hostPath": + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + case "emptyDir": + if r.TryDecodeAsNil() { + if x.EmptyDir != nil { + x.EmptyDir = nil + } + } else { + if x.EmptyDir == nil { + x.EmptyDir = new(EmptyDirVolumeSource) + } + x.EmptyDir.CodecDecodeSelf(d) + } + case "gcePersistentDisk": + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + case "awsElasticBlockStore": + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + case "gitRepo": + if r.TryDecodeAsNil() { + if x.GitRepo != nil { + x.GitRepo = nil + } + } else { + if x.GitRepo == nil { + x.GitRepo = new(GitRepoVolumeSource) + } + x.GitRepo.CodecDecodeSelf(d) + } + case "secret": + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretVolumeSource) + } + x.Secret.CodecDecodeSelf(d) + } + case "nfs": + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + case "iscsi": + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + case "glusterfs": + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + case "persistentVolumeClaim": + if r.TryDecodeAsNil() { + if x.PersistentVolumeClaim != nil { + x.PersistentVolumeClaim = nil + } + } else { + if x.PersistentVolumeClaim == nil { + x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + x.PersistentVolumeClaim.CodecDecodeSelf(d) + } + case "rbd": + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + case "flexVolume": + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + case "cinder": + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + case "cephfs": + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + case "flocker": + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + case "downwardAPI": + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIVolumeSource) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + case "fc": + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + case "azureFile": + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + case "configMap": + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapVolumeSource) + } + x.ConfigMap.CodecDecodeSelf(d) + } + case "vsphereVolume": + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + case "quobyte": + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + case "azureDisk": + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + case "photonPersistentDisk": + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + case "projected": + if r.TryDecodeAsNil() { + if x.Projected != nil { + x.Projected = nil + } + } else { + if x.Projected == nil { + x.Projected = new(ProjectedVolumeSource) + } + x.Projected.CodecDecodeSelf(d) + } + case "portworxVolume": + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + case "scaleIO": + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + case "storageos": + if r.TryDecodeAsNil() { + if x.StorageOS != nil { + x.StorageOS = nil + } + } else { + if x.StorageOS == nil { + x.StorageOS = new(StorageOSVolumeSource) + } + x.StorageOS.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj31 int + var yyb31 bool + var yyhl31 bool = l >= 0 + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.EmptyDir != nil { + x.EmptyDir = nil + } + } else { + if x.EmptyDir == nil { + x.EmptyDir = new(EmptyDirVolumeSource) + } + x.EmptyDir.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GitRepo != nil { + x.GitRepo = nil + } + } else { + if x.GitRepo == nil { + x.GitRepo = new(GitRepoVolumeSource) + } + x.GitRepo.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretVolumeSource) + } + x.Secret.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PersistentVolumeClaim != nil { + x.PersistentVolumeClaim = nil + } + } else { + if x.PersistentVolumeClaim == nil { + x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + x.PersistentVolumeClaim.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIVolumeSource) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapVolumeSource) + } + x.ConfigMap.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Projected != nil { + x.Projected = nil + } + } else { + if x.Projected == nil { + x.Projected = new(ProjectedVolumeSource) + } + x.Projected.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StorageOS != nil { + x.StorageOS = nil + } + } else { + if x.StorageOS == nil { + x.StorageOS = new(StorageOSVolumeSource) + } + x.StorageOS.CodecDecodeSelf(d) + } + for { + yyj31++ + if yyhl31 { + yyb31 = yyj31 > l + } else { + yyb31 = r.CheckBreak() + } + if yyb31 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj31-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeClaimVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("claimName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeClaimVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "claimName": + if r.TryDecodeAsNil() { + x.ClaimName = "" + } else { + yyv4 := &x.ClaimName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv6 := &x.ReadOnly + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ClaimName = "" + } else { + yyv9 := &x.ClaimName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv11 := &x.ReadOnly + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [21]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.GCEPersistentDisk != nil + yyq2[1] = x.AWSElasticBlockStore != nil + yyq2[2] = x.HostPath != nil + yyq2[3] = x.Glusterfs != nil + yyq2[4] = x.NFS != nil + yyq2[5] = x.RBD != nil + yyq2[6] = x.ISCSI != nil + yyq2[7] = x.Cinder != nil + yyq2[8] = x.CephFS != nil + yyq2[9] = x.FC != nil + yyq2[10] = x.Flocker != nil + yyq2[11] = x.FlexVolume != nil + yyq2[12] = x.AzureFile != nil + yyq2[13] = x.VsphereVolume != nil + yyq2[14] = x.Quobyte != nil + yyq2[15] = x.AzureDisk != nil + yyq2[16] = x.PhotonPersistentDisk != nil + yyq2[17] = x.PortworxVolume != nil + yyq2[18] = x.ScaleIO != nil + yyq2[19] = x.Local != nil + yyq2[20] = x.StorageOS != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(21) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rbd")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iscsi")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cinder")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cephfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fc")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flocker")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureFile")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("quobyte")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.Local == nil { + r.EncodeNil() + } else { + x.Local.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("local")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Local == nil { + r.EncodeNil() + } else { + x.Local.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[20] { + if x.StorageOS == nil { + r.EncodeNil() + } else { + x.StorageOS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[20] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storageos")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StorageOS == nil { + r.EncodeNil() + } else { + x.StorageOS.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "gcePersistentDisk": + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + case "awsElasticBlockStore": + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + case "hostPath": + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + case "glusterfs": + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + case "nfs": + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + case "rbd": + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + case "iscsi": + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + case "cinder": + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + case "cephfs": + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + case "fc": + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + case "flocker": + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + case "flexVolume": + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + case "azureFile": + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + case "vsphereVolume": + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + case "quobyte": + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + case "azureDisk": + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + case "photonPersistentDisk": + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + case "portworxVolume": + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + case "scaleIO": + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + case "local": + if r.TryDecodeAsNil() { + if x.Local != nil { + x.Local = nil + } + } else { + if x.Local == nil { + x.Local = new(LocalVolumeSource) + } + x.Local.CodecDecodeSelf(d) + } + case "storageos": + if r.TryDecodeAsNil() { + if x.StorageOS != nil { + x.StorageOS = nil + } + } else { + if x.StorageOS == nil { + x.StorageOS = new(StorageOSPersistentVolumeSource) + } + x.StorageOS.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj25 int + var yyb25 bool + var yyhl25 bool = l >= 0 + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Local != nil { + x.Local = nil + } + } else { + if x.Local == nil { + x.Local = new(LocalVolumeSource) + } + x.Local.CodecDecodeSelf(d) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StorageOS != nil { + x.StorageOS = nil + } + } else { + if x.StorageOS == nil { + x.StorageOS = new(StorageOSPersistentVolumeSource) + } + x.StorageOS.CodecDecodeSelf(d) + } + for { + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj25-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolume) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolume) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PersistentVolumeSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PersistentVolumeStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PersistentVolumeSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PersistentVolumeStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [26]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Capacity) != 0 + yyq2[1] = x.PersistentVolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil + yyq2[2] = x.PersistentVolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil + yyq2[3] = x.PersistentVolumeSource.HostPath != nil && x.HostPath != nil + yyq2[4] = x.PersistentVolumeSource.Glusterfs != nil && x.Glusterfs != nil + yyq2[5] = x.PersistentVolumeSource.NFS != nil && x.NFS != nil + yyq2[6] = x.PersistentVolumeSource.RBD != nil && x.RBD != nil + yyq2[7] = x.PersistentVolumeSource.ISCSI != nil && x.ISCSI != nil + yyq2[8] = x.PersistentVolumeSource.Cinder != nil && x.Cinder != nil + yyq2[9] = x.PersistentVolumeSource.CephFS != nil && x.CephFS != nil + yyq2[10] = x.PersistentVolumeSource.FC != nil && x.FC != nil + yyq2[11] = x.PersistentVolumeSource.Flocker != nil && x.Flocker != nil + yyq2[12] = x.PersistentVolumeSource.FlexVolume != nil && x.FlexVolume != nil + yyq2[13] = x.PersistentVolumeSource.AzureFile != nil && x.AzureFile != nil + yyq2[14] = x.PersistentVolumeSource.VsphereVolume != nil && x.VsphereVolume != nil + yyq2[15] = x.PersistentVolumeSource.Quobyte != nil && x.Quobyte != nil + yyq2[16] = x.PersistentVolumeSource.AzureDisk != nil && x.AzureDisk != nil + yyq2[17] = x.PersistentVolumeSource.PhotonPersistentDisk != nil && x.PhotonPersistentDisk != nil + yyq2[18] = x.PersistentVolumeSource.PortworxVolume != nil && x.PortworxVolume != nil + yyq2[19] = x.PersistentVolumeSource.ScaleIO != nil && x.ScaleIO != nil + yyq2[20] = x.PersistentVolumeSource.Local != nil && x.Local != nil + yyq2[21] = x.PersistentVolumeSource.StorageOS != nil && x.StorageOS != nil + yyq2[22] = len(x.AccessModes) != 0 + yyq2[23] = x.ClaimRef != nil + yyq2[24] = x.PersistentVolumeReclaimPolicy != "" + yyq2[25] = x.StorageClassName != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(26) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("capacity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } + } + var yyn6 bool + if x.PersistentVolumeSource.GCEPersistentDisk == nil { + yyn6 = true + goto LABEL6 + } + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn6 { + r.EncodeNil() + } else { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn9 bool + if x.PersistentVolumeSource.AWSElasticBlockStore == nil { + yyn9 = true + goto LABEL9 + } + LABEL9: + if yyr2 || yy2arr2 { + if yyn9 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn9 { + r.EncodeNil() + } else { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } + } + } + var yyn12 bool + if x.PersistentVolumeSource.HostPath == nil { + yyn12 = true + goto LABEL12 + } + LABEL12: + if yyr2 || yy2arr2 { + if yyn12 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn12 { + r.EncodeNil() + } else { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } + } + } + var yyn15 bool + if x.PersistentVolumeSource.Glusterfs == nil { + yyn15 = true + goto LABEL15 + } + LABEL15: + if yyr2 || yy2arr2 { + if yyn15 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn15 { + r.EncodeNil() + } else { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } + } + } + var yyn18 bool + if x.PersistentVolumeSource.NFS == nil { + yyn18 = true + goto LABEL18 + } + LABEL18: + if yyr2 || yy2arr2 { + if yyn18 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn18 { + r.EncodeNil() + } else { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } + } + } + var yyn21 bool + if x.PersistentVolumeSource.RBD == nil { + yyn21 = true + goto LABEL21 + } + LABEL21: + if yyr2 || yy2arr2 { + if yyn21 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rbd")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn21 { + r.EncodeNil() + } else { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } + } + } + var yyn24 bool + if x.PersistentVolumeSource.ISCSI == nil { + yyn24 = true + goto LABEL24 + } + LABEL24: + if yyr2 || yy2arr2 { + if yyn24 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iscsi")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn24 { + r.EncodeNil() + } else { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } + } + } + var yyn27 bool + if x.PersistentVolumeSource.Cinder == nil { + yyn27 = true + goto LABEL27 + } + LABEL27: + if yyr2 || yy2arr2 { + if yyn27 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cinder")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn27 { + r.EncodeNil() + } else { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } + } + } + var yyn30 bool + if x.PersistentVolumeSource.CephFS == nil { + yyn30 = true + goto LABEL30 + } + LABEL30: + if yyr2 || yy2arr2 { + if yyn30 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cephfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn30 { + r.EncodeNil() + } else { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } + } + } + var yyn33 bool + if x.PersistentVolumeSource.FC == nil { + yyn33 = true + goto LABEL33 + } + LABEL33: + if yyr2 || yy2arr2 { + if yyn33 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fc")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn33 { + r.EncodeNil() + } else { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } + } + } + var yyn36 bool + if x.PersistentVolumeSource.Flocker == nil { + yyn36 = true + goto LABEL36 + } + LABEL36: + if yyr2 || yy2arr2 { + if yyn36 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flocker")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn36 { + r.EncodeNil() + } else { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } + } + } + var yyn39 bool + if x.PersistentVolumeSource.FlexVolume == nil { + yyn39 = true + goto LABEL39 + } + LABEL39: + if yyr2 || yy2arr2 { + if yyn39 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn39 { + r.EncodeNil() + } else { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn42 bool + if x.PersistentVolumeSource.AzureFile == nil { + yyn42 = true + goto LABEL42 + } + LABEL42: + if yyr2 || yy2arr2 { + if yyn42 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureFile")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn42 { + r.EncodeNil() + } else { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } + } + } + var yyn45 bool + if x.PersistentVolumeSource.VsphereVolume == nil { + yyn45 = true + goto LABEL45 + } + LABEL45: + if yyr2 || yy2arr2 { + if yyn45 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn45 { + r.EncodeNil() + } else { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn48 bool + if x.PersistentVolumeSource.Quobyte == nil { + yyn48 = true + goto LABEL48 + } + LABEL48: + if yyr2 || yy2arr2 { + if yyn48 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("quobyte")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn48 { + r.EncodeNil() + } else { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } + } + } + var yyn51 bool + if x.PersistentVolumeSource.AzureDisk == nil { + yyn51 = true + goto LABEL51 + } + LABEL51: + if yyr2 || yy2arr2 { + if yyn51 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn51 { + r.EncodeNil() + } else { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn54 bool + if x.PersistentVolumeSource.PhotonPersistentDisk == nil { + yyn54 = true + goto LABEL54 + } + LABEL54: + if yyr2 || yy2arr2 { + if yyn54 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn54 { + r.EncodeNil() + } else { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn57 bool + if x.PersistentVolumeSource.PortworxVolume == nil { + yyn57 = true + goto LABEL57 + } + LABEL57: + if yyr2 || yy2arr2 { + if yyn57 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn57 { + r.EncodeNil() + } else { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn60 bool + if x.PersistentVolumeSource.ScaleIO == nil { + yyn60 = true + goto LABEL60 + } + LABEL60: + if yyr2 || yy2arr2 { + if yyn60 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn60 { + r.EncodeNil() + } else { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } + } + } + var yyn63 bool + if x.PersistentVolumeSource.Local == nil { + yyn63 = true + goto LABEL63 + } + LABEL63: + if yyr2 || yy2arr2 { + if yyn63 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[20] { + if x.Local == nil { + r.EncodeNil() + } else { + x.Local.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[20] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("local")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn63 { + r.EncodeNil() + } else { + if x.Local == nil { + r.EncodeNil() + } else { + x.Local.CodecEncodeSelf(e) + } + } + } + } + var yyn66 bool + if x.PersistentVolumeSource.StorageOS == nil { + yyn66 = true + goto LABEL66 + } + LABEL66: + if yyr2 || yy2arr2 { + if yyn66 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[21] { + if x.StorageOS == nil { + r.EncodeNil() + } else { + x.StorageOS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[21] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storageos")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn66 { + r.EncodeNil() + } else { + if x.StorageOS == nil { + r.EncodeNil() + } else { + x.StorageOS.CodecEncodeSelf(e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[22] { + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym70 := z.EncBinary() + _ = yym70 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[22] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("accessModes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym71 := z.EncBinary() + _ = yym71 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[23] { + if x.ClaimRef == nil { + r.EncodeNil() + } else { + x.ClaimRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[23] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("claimRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ClaimRef == nil { + r.EncodeNil() + } else { + x.ClaimRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[24] { + x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[24] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeReclaimPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[25] { + yym79 := z.EncBinary() + _ = yym79 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StorageClassName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[25] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storageClassName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym80 := z.EncBinary() + _ = yym80 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StorageClassName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "capacity": + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv4 := &x.Capacity + yyv4.CodecDecodeSelf(d) + } + case "gcePersistentDisk": + if x.PersistentVolumeSource.GCEPersistentDisk == nil { + x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + case "awsElasticBlockStore": + if x.PersistentVolumeSource.AWSElasticBlockStore == nil { + x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + case "hostPath": + if x.PersistentVolumeSource.HostPath == nil { + x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) + } + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + case "glusterfs": + if x.PersistentVolumeSource.Glusterfs == nil { + x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + case "nfs": + if x.PersistentVolumeSource.NFS == nil { + x.PersistentVolumeSource.NFS = new(NFSVolumeSource) + } + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + case "rbd": + if x.PersistentVolumeSource.RBD == nil { + x.PersistentVolumeSource.RBD = new(RBDVolumeSource) + } + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + case "iscsi": + if x.PersistentVolumeSource.ISCSI == nil { + x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + case "cinder": + if x.PersistentVolumeSource.Cinder == nil { + x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + case "cephfs": + if x.PersistentVolumeSource.CephFS == nil { + x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) + } + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + case "fc": + if x.PersistentVolumeSource.FC == nil { + x.PersistentVolumeSource.FC = new(FCVolumeSource) + } + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + case "flocker": + if x.PersistentVolumeSource.Flocker == nil { + x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + case "flexVolume": + if x.PersistentVolumeSource.FlexVolume == nil { + x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) + } + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + case "azureFile": + if x.PersistentVolumeSource.AzureFile == nil { + x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + case "vsphereVolume": + if x.PersistentVolumeSource.VsphereVolume == nil { + x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + case "quobyte": + if x.PersistentVolumeSource.Quobyte == nil { + x.PersistentVolumeSource.Quobyte = new(QuobyteVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + case "azureDisk": + if x.PersistentVolumeSource.AzureDisk == nil { + x.PersistentVolumeSource.AzureDisk = new(AzureDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + case "photonPersistentDisk": + if x.PersistentVolumeSource.PhotonPersistentDisk == nil { + x.PersistentVolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + case "portworxVolume": + if x.PersistentVolumeSource.PortworxVolume == nil { + x.PersistentVolumeSource.PortworxVolume = new(PortworxVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + case "scaleIO": + if x.PersistentVolumeSource.ScaleIO == nil { + x.PersistentVolumeSource.ScaleIO = new(ScaleIOVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + case "local": + if x.PersistentVolumeSource.Local == nil { + x.PersistentVolumeSource.Local = new(LocalVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Local != nil { + x.Local = nil + } + } else { + if x.Local == nil { + x.Local = new(LocalVolumeSource) + } + x.Local.CodecDecodeSelf(d) + } + case "storageos": + if x.PersistentVolumeSource.StorageOS == nil { + x.PersistentVolumeSource.StorageOS = new(StorageOSPersistentVolumeSource) + } + if r.TryDecodeAsNil() { + if x.StorageOS != nil { + x.StorageOS = nil + } + } else { + if x.StorageOS == nil { + x.StorageOS = new(StorageOSPersistentVolumeSource) + } + x.StorageOS.CodecDecodeSelf(d) + } + case "accessModes": + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv26 := &x.AccessModes + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv26), d) + } + } + case "claimRef": + if r.TryDecodeAsNil() { + if x.ClaimRef != nil { + x.ClaimRef = nil + } + } else { + if x.ClaimRef == nil { + x.ClaimRef = new(ObjectReference) + } + x.ClaimRef.CodecDecodeSelf(d) + } + case "persistentVolumeReclaimPolicy": + if r.TryDecodeAsNil() { + x.PersistentVolumeReclaimPolicy = "" + } else { + yyv29 := &x.PersistentVolumeReclaimPolicy + yyv29.CodecDecodeSelf(d) + } + case "storageClassName": + if r.TryDecodeAsNil() { + x.StorageClassName = "" + } else { + yyv30 := &x.StorageClassName + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(yyv30)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj32 int + var yyb32 bool + var yyhl32 bool = l >= 0 + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv33 := &x.Capacity + yyv33.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.GCEPersistentDisk == nil { + x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.AWSElasticBlockStore == nil { + x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.HostPath == nil { + x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.Glusterfs == nil { + x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.NFS == nil { + x.PersistentVolumeSource.NFS = new(NFSVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.RBD == nil { + x.PersistentVolumeSource.RBD = new(RBDVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.ISCSI == nil { + x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.Cinder == nil { + x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.CephFS == nil { + x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.FC == nil { + x.PersistentVolumeSource.FC = new(FCVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.Flocker == nil { + x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.FlexVolume == nil { + x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.AzureFile == nil { + x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.VsphereVolume == nil { + x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.Quobyte == nil { + x.PersistentVolumeSource.Quobyte = new(QuobyteVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.AzureDisk == nil { + x.PersistentVolumeSource.AzureDisk = new(AzureDiskVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.PhotonPersistentDisk == nil { + x.PersistentVolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.PortworxVolume == nil { + x.PersistentVolumeSource.PortworxVolume = new(PortworxVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.ScaleIO == nil { + x.PersistentVolumeSource.ScaleIO = new(ScaleIOVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.Local == nil { + x.PersistentVolumeSource.Local = new(LocalVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Local != nil { + x.Local = nil + } + } else { + if x.Local == nil { + x.Local = new(LocalVolumeSource) + } + x.Local.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.StorageOS == nil { + x.PersistentVolumeSource.StorageOS = new(StorageOSPersistentVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StorageOS != nil { + x.StorageOS = nil + } + } else { + if x.StorageOS == nil { + x.StorageOS = new(StorageOSPersistentVolumeSource) + } + x.StorageOS.CodecDecodeSelf(d) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv55 := &x.AccessModes + yym56 := z.DecBinary() + _ = yym56 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv55), d) + } + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ClaimRef != nil { + x.ClaimRef = nil + } + } else { + if x.ClaimRef == nil { + x.ClaimRef = new(ObjectReference) + } + x.ClaimRef.CodecDecodeSelf(d) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PersistentVolumeReclaimPolicy = "" + } else { + yyv58 := &x.PersistentVolumeReclaimPolicy + yyv58.CodecDecodeSelf(d) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StorageClassName = "" + } else { + yyv59 := &x.StorageClassName + yym60 := z.DecBinary() + _ = yym60 + if false { + } else { + *((*string)(yyv59)) = r.DecodeString() + } + } + for { + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj32-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x PersistentVolumeReclaimPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PersistentVolumeReclaimPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *PersistentVolumeStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + yyq2[1] = x.Message != "" + yyq2[2] = x.Reason != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Phase.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("phase")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Phase.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "phase": + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv5 := &x.Message + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv7 := &x.Reason + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv10 := &x.Phase + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv11 := &x.Message + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv13 := &x.Reason + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePersistentVolume((*[]PersistentVolume)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePersistentVolume((*[]PersistentVolume)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeClaim) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeClaim) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeClaim) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PersistentVolumeClaimSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PersistentVolumeClaimStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeClaim) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PersistentVolumeClaimSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PersistentVolumeClaimStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeClaimList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeClaimList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeClaimList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeClaimList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeClaimSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.AccessModes) != 0 + yyq2[1] = x.Selector != nil + yyq2[2] = true + yyq2[3] = x.VolumeName != "" + yyq2[4] = x.StorageClassName != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("accessModes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.Resources + yy10.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resources")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.Resources + yy12.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.StorageClassName == nil { + r.EncodeNil() + } else { + yy18 := *x.StorageClassName + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy18)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storageClassName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StorageClassName == nil { + r.EncodeNil() + } else { + yy20 := *x.StorageClassName + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy20)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeClaimSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "accessModes": + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv4 := &x.AccessModes + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv4), d) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg2_v1.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "resources": + if r.TryDecodeAsNil() { + x.Resources = ResourceRequirements{} + } else { + yyv8 := &x.Resources + yyv8.CodecDecodeSelf(d) + } + case "volumeName": + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv9 := &x.VolumeName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + case "storageClassName": + if r.TryDecodeAsNil() { + if x.StorageClassName != nil { + x.StorageClassName = nil + } + } else { + if x.StorageClassName == nil { + x.StorageClassName = new(string) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(x.StorageClassName)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv14 := &x.AccessModes + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv14), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg2_v1.LabelSelector) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resources = ResourceRequirements{} + } else { + yyv18 := &x.Resources + yyv18.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv19 := &x.VolumeName + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StorageClassName != nil { + x.StorageClassName = nil + } + } else { + if x.StorageClassName == nil { + x.StorageClassName = new(string) + } + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(x.StorageClassName)) = r.DecodeString() + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeClaimStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + yyq2[1] = len(x.AccessModes) != 0 + yyq2[2] = len(x.Capacity) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Phase.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("phase")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Phase.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("accessModes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("capacity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeClaimStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "phase": + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) + } + case "accessModes": + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv5 := &x.AccessModes + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv5), d) + } + } + case "capacity": + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv7 := &x.Capacity + yyv7.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv9 := &x.Phase + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv10 := &x.AccessModes + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv10), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv12 := &x.Capacity + yyv12.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x PersistentVolumeAccessMode) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PersistentVolumeAccessMode) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x PersistentVolumePhase) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PersistentVolumePhase) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x PersistentVolumeClaimPhase) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PersistentVolumeClaimPhase) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *HostPathVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HostPathVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HostPathVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HostPathVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv7 := &x.Path + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EmptyDirVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Medium != "" + yyq2[1] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Medium.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("medium")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Medium.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy7 := &x.SizeLimit + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sizeLimit")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.SizeLimit + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EmptyDirVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EmptyDirVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "medium": + if r.TryDecodeAsNil() { + x.Medium = "" + } else { + yyv4 := &x.Medium + yyv4.CodecDecodeSelf(d) + } + case "sizeLimit": + if r.TryDecodeAsNil() { + x.SizeLimit = pkg3_resource.Quantity{} + } else { + yyv5 := &x.SizeLimit + yym6 := z.DecBinary() + _ = yym6 + if false { + } else if z.HasExtensions() && z.DecExt(yyv5) { + } else if !yym6 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv5) + } else { + z.DecFallback(yyv5, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EmptyDirVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Medium = "" + } else { + yyv8 := &x.Medium + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SizeLimit = pkg3_resource.Quantity{} + } else { + yyv9 := &x.SizeLimit + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *GlusterfsVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("endpoints")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *GlusterfsVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *GlusterfsVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "endpoints": + if r.TryDecodeAsNil() { + x.EndpointsName = "" + } else { + yyv4 := &x.EndpointsName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *GlusterfsVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EndpointsName = "" + } else { + yyv11 := &x.EndpointsName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv13 := &x.Path + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.FSType != "" + yyq2[3] = x.RBDPool != "" + yyq2[4] = x.RadosUser != "" + yyq2[5] = x.Keyring != "" + yyq2[6] = x.SecretRef != nil + yyq2[7] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.CephMonitors == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.CephMonitors, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("monitors")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CephMonitors == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.CephMonitors, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("image")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pool")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Keyring)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("keyring")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Keyring)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RBDVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RBDVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "monitors": + if r.TryDecodeAsNil() { + x.CephMonitors = nil + } else { + yyv4 := &x.CephMonitors + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "image": + if r.TryDecodeAsNil() { + x.RBDImage = "" + } else { + yyv6 := &x.RBDImage + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv8 := &x.FSType + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "pool": + if r.TryDecodeAsNil() { + x.RBDPool = "" + } else { + yyv10 := &x.RBDPool + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "user": + if r.TryDecodeAsNil() { + x.RadosUser = "" + } else { + yyv12 := &x.RadosUser + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "keyring": + if r.TryDecodeAsNil() { + x.Keyring = "" + } else { + yyv14 := &x.Keyring + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv17 := &x.ReadOnly + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj19 int + var yyb19 bool + var yyhl19 bool = l >= 0 + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CephMonitors = nil + } else { + yyv20 := &x.CephMonitors + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + z.F.DecSliceStringX(yyv20, false, d) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RBDImage = "" + } else { + yyv22 := &x.RBDImage + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv24 := &x.FSType + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RBDPool = "" + } else { + yyv26 := &x.RBDPool + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RadosUser = "" + } else { + yyv28 := &x.RadosUser + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Keyring = "" + } else { + yyv30 := &x.Keyring + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(yyv30)) = r.DecodeString() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv33 := &x.ReadOnly + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*bool)(yyv33)) = r.DecodeBool() + } + } + for { + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj19-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CinderVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CinderVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CinderVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumeID": + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv4 := &x.VolumeID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CinderVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv11 := &x.VolumeID + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv13 := &x.FSType + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Path != "" + yyq2[2] = x.User != "" + yyq2[3] = x.SecretFile != "" + yyq2[4] = x.SecretRef != nil + yyq2[5] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Monitors == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Monitors, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("monitors")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Monitors == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Monitors, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretFile")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CephFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CephFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "monitors": + if r.TryDecodeAsNil() { + x.Monitors = nil + } else { + yyv4 := &x.Monitors + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "user": + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv8 := &x.User + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "secretFile": + if r.TryDecodeAsNil() { + x.SecretFile = "" + } else { + yyv10 := &x.SecretFile + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv13 := &x.ReadOnly + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*bool)(yyv13)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CephFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Monitors = nil + } else { + yyv16 := &x.Monitors + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + z.F.DecSliceStringX(yyv16, false, d) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv18 := &x.Path + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv20 := &x.User + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SecretFile = "" + } else { + yyv22 := &x.SecretFile + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv25 := &x.ReadOnly + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + for { + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj15-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *FlockerVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.DatasetName != "" + yyq2[1] = x.DatasetUUID != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("datasetName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DatasetUUID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("datasetUUID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DatasetUUID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *FlockerVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *FlockerVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "datasetName": + if r.TryDecodeAsNil() { + x.DatasetName = "" + } else { + yyv4 := &x.DatasetName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "datasetUUID": + if r.TryDecodeAsNil() { + x.DatasetUUID = "" + } else { + yyv6 := &x.DatasetUUID + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *FlockerVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DatasetName = "" + } else { + yyv9 := &x.DatasetName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DatasetUUID = "" + } else { + yyv11 := &x.DatasetUUID + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x StorageMedium) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *StorageMedium) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x Protocol) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *Protocol) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *GCEPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.Partition != 0 + yyq2[3] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PDName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pdName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PDName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Partition)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("partition")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.Partition)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *GCEPersistentDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "pdName": + if r.TryDecodeAsNil() { + x.PDName = "" + } else { + yyv4 := &x.PDName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "partition": + if r.TryDecodeAsNil() { + x.Partition = 0 + } else { + yyv8 := &x.Partition + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv10 := &x.ReadOnly + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PDName = "" + } else { + yyv13 := &x.PDName + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv15 := &x.FSType + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Partition = 0 + } else { + yyv17 := &x.Partition + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *QuobyteVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.ReadOnly != false + yyq2[3] = x.User != "" + yyq2[4] = x.Group != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Registry)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("registry")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Registry)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Volume)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Volume)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Group)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("group")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Group)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *QuobyteVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *QuobyteVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "registry": + if r.TryDecodeAsNil() { + x.Registry = "" + } else { + yyv4 := &x.Registry + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "volume": + if r.TryDecodeAsNil() { + x.Volume = "" + } else { + yyv6 := &x.Volume + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "user": + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv10 := &x.User + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "group": + if r.TryDecodeAsNil() { + x.Group = "" + } else { + yyv12 := &x.Group + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *QuobyteVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Registry = "" + } else { + yyv15 := &x.Registry + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Volume = "" + } else { + yyv17 := &x.Volume + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv21 := &x.User + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Group = "" + } else { + yyv23 := &x.Group + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.SecretRef != nil + yyq2[3] = x.ReadOnly != false + yyq2[4] = len(x.Options) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Driver)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("driver")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Driver)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Options == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncMapStringStringV(x.Options, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("options")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Options == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncMapStringStringV(x.Options, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *FlexVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *FlexVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "driver": + if r.TryDecodeAsNil() { + x.Driver = "" + } else { + yyv4 := &x.Driver + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv9 := &x.ReadOnly + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*bool)(yyv9)) = r.DecodeBool() + } + } + case "options": + if r.TryDecodeAsNil() { + x.Options = nil + } else { + yyv11 := &x.Options + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + z.F.DecMapStringStringX(yyv11, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *FlexVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Driver = "" + } else { + yyv14 := &x.Driver + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv16 := &x.FSType + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Options = nil + } else { + yyv21 := &x.Options + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + z.F.DecMapStringStringX(yyv21, false, d) + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *AWSElasticBlockStoreVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.Partition != 0 + yyq2[3] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Partition)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("partition")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.Partition)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *AWSElasticBlockStoreVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumeID": + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv4 := &x.VolumeID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "partition": + if r.TryDecodeAsNil() { + x.Partition = 0 + } else { + yyv8 := &x.Partition + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv10 := &x.ReadOnly + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv13 := &x.VolumeID + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv15 := &x.FSType + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Partition = 0 + } else { + yyv17 := &x.Partition + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *GitRepoVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Revision != "" + yyq2[2] = x.Directory != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Repository)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("repository")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Repository)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Revision)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("revision")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Revision)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Directory)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("directory")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Directory)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *GitRepoVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *GitRepoVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "repository": + if r.TryDecodeAsNil() { + x.Repository = "" + } else { + yyv4 := &x.Repository + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "revision": + if r.TryDecodeAsNil() { + x.Revision = "" + } else { + yyv6 := &x.Revision + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "directory": + if r.TryDecodeAsNil() { + x.Directory = "" + } else { + yyv8 := &x.Directory + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *GitRepoVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Repository = "" + } else { + yyv11 := &x.Repository + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Revision = "" + } else { + yyv13 := &x.Revision + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Directory = "" + } else { + yyv15 := &x.Directory + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.SecretName != "" + yyq2[1] = len(x.Items) != 0 + yyq2[2] = x.DefaultMode != nil + yyq2[3] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy10 := *x.DefaultMode + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy12 := *x.DefaultMode + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy15 := *x.Optional + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(yy15)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy17 := *x.Optional + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeBool(bool(yy17)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "secretName": + if r.TryDecodeAsNil() { + x.SecretName = "" + } else { + yyv4 := &x.SecretName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) + } + } + case "defaultMode": + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SecretName = "" + } else { + yyv13 := &x.SecretName + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv15 := &x.Items + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv15), d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecretProjection) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = len(x.Items) != 0 + yyq2[2] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy10 := *x.Optional + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy12 := *x.Optional + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretProjection) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv11 := &x.Name + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Server)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("server")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Server)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "server": + if r.TryDecodeAsNil() { + x.Server = "" + } else { + yyv4 := &x.Server + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Server = "" + } else { + yyv11 := &x.Server + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv13 := &x.Path + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[3] = x.ISCSIInterface != "" + yyq2[4] = x.FSType != "" + yyq2[5] = x.ReadOnly != false + yyq2[6] = len(x.Portals) != 0 + yyq2[7] = x.DiscoveryCHAPAuth != false + yyq2[8] = x.SessionCHAPAuth != false + yyq2[9] = x.SecretRef != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetPortal")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IQN)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iqn")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IQN)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Lun)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lun")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.Lun)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iscsiInterface")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.Portals == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + z.F.EncSliceStringV(x.Portals, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portals")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Portals == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + z.F.EncSliceStringV(x.Portals, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(x.DiscoveryCHAPAuth)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("chapAuthDiscovery")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeBool(bool(x.DiscoveryCHAPAuth)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeBool(bool(x.SessionCHAPAuth)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("chapAuthSession")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeBool(bool(x.SessionCHAPAuth)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ISCSIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ISCSIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "targetPortal": + if r.TryDecodeAsNil() { + x.TargetPortal = "" + } else { + yyv4 := &x.TargetPortal + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "iqn": + if r.TryDecodeAsNil() { + x.IQN = "" + } else { + yyv6 := &x.IQN + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "lun": + if r.TryDecodeAsNil() { + x.Lun = 0 + } else { + yyv8 := &x.Lun + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "iscsiInterface": + if r.TryDecodeAsNil() { + x.ISCSIInterface = "" + } else { + yyv10 := &x.ISCSIInterface + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv12 := &x.FSType + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv14 := &x.ReadOnly + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "portals": + if r.TryDecodeAsNil() { + x.Portals = nil + } else { + yyv16 := &x.Portals + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + z.F.DecSliceStringX(yyv16, false, d) + } + } + case "chapAuthDiscovery": + if r.TryDecodeAsNil() { + x.DiscoveryCHAPAuth = false + } else { + yyv18 := &x.DiscoveryCHAPAuth + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*bool)(yyv18)) = r.DecodeBool() + } + } + case "chapAuthSession": + if r.TryDecodeAsNil() { + x.SessionCHAPAuth = false + } else { + yyv20 := &x.SessionCHAPAuth + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*bool)(yyv20)) = r.DecodeBool() + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetPortal = "" + } else { + yyv24 := &x.TargetPortal + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.IQN = "" + } else { + yyv26 := &x.IQN + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Lun = 0 + } else { + yyv28 := &x.Lun + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*int32)(yyv28)) = int32(r.DecodeInt(32)) + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ISCSIInterface = "" + } else { + yyv30 := &x.ISCSIInterface + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(yyv30)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv32 := &x.FSType + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*string)(yyv32)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv34 := &x.ReadOnly + yym35 := z.DecBinary() + _ = yym35 + if false { + } else { + *((*bool)(yyv34)) = r.DecodeBool() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Portals = nil + } else { + yyv36 := &x.Portals + yym37 := z.DecBinary() + _ = yym37 + if false { + } else { + z.F.DecSliceStringX(yyv36, false, d) + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DiscoveryCHAPAuth = false + } else { + yyv38 := &x.DiscoveryCHAPAuth + yym39 := z.DecBinary() + _ = yym39 + if false { + } else { + *((*bool)(yyv38)) = r.DecodeBool() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SessionCHAPAuth = false + } else { + yyv40 := &x.SessionCHAPAuth + yym41 := z.DecBinary() + _ = yym41 + if false { + } else { + *((*bool)(yyv40)) = r.DecodeBool() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + for { + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj23-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *FCVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.FSType != "" + yyq2[3] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.TargetWWNs == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.TargetWWNs, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetWWNs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetWWNs == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.TargetWWNs, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Lun == nil { + r.EncodeNil() + } else { + yy7 := *x.Lun + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lun")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Lun == nil { + r.EncodeNil() + } else { + yy9 := *x.Lun + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *FCVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *FCVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "targetWWNs": + if r.TryDecodeAsNil() { + x.TargetWWNs = nil + } else { + yyv4 := &x.TargetWWNs + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "lun": + if r.TryDecodeAsNil() { + if x.Lun != nil { + x.Lun = nil + } + } else { + if x.Lun == nil { + x.Lun = new(int32) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv8 := &x.FSType + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv10 := &x.ReadOnly + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *FCVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetWWNs = nil + } else { + yyv13 := &x.TargetWWNs + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecSliceStringX(yyv13, false, d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Lun != nil { + x.Lun = nil + } + } else { + if x.Lun == nil { + x.Lun = new(int32) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv17 := &x.FSType + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *AzureFileVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ShareName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("shareName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ShareName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *AzureFileVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *AzureFileVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "secretName": + if r.TryDecodeAsNil() { + x.SecretName = "" + } else { + yyv4 := &x.SecretName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "shareName": + if r.TryDecodeAsNil() { + x.ShareName = "" + } else { + yyv6 := &x.ShareName + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *AzureFileVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SecretName = "" + } else { + yyv11 := &x.SecretName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ShareName = "" + } else { + yyv13 := &x.ShareName + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *VsphereVirtualDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.StoragePolicyName != "" + yyq2[3] = x.StoragePolicyID != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumePath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StoragePolicyName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storagePolicyName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StoragePolicyName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StoragePolicyID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storagePolicyID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StoragePolicyID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *VsphereVirtualDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumePath": + if r.TryDecodeAsNil() { + x.VolumePath = "" + } else { + yyv4 := &x.VolumePath + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "storagePolicyName": + if r.TryDecodeAsNil() { + x.StoragePolicyName = "" + } else { + yyv8 := &x.StoragePolicyName + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "storagePolicyID": + if r.TryDecodeAsNil() { + x.StoragePolicyID = "" + } else { + yyv10 := &x.StoragePolicyID + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumePath = "" + } else { + yyv13 := &x.VolumePath + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv15 := &x.FSType + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StoragePolicyName = "" + } else { + yyv17 := &x.StoragePolicyName + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StoragePolicyID = "" + } else { + yyv19 := &x.StoragePolicyID + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PhotonPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PdID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pdID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PdID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PhotonPersistentDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PhotonPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "pdID": + if r.TryDecodeAsNil() { + x.PdID = "" + } else { + yyv4 := &x.PdID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PhotonPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PdID = "" + } else { + yyv9 := &x.PdID + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv11 := &x.FSType + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x AzureDataDiskCachingMode) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *AzureDataDiskCachingMode) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x AzureDataDiskKind) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *AzureDataDiskKind) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *AzureDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.CachingMode != nil + yyq2[3] = x.FSType != nil + yyq2[4] = x.ReadOnly != nil + yyq2[5] = x.Kind != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DiskName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("diskName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DiskName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DataDiskURI)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("diskURI")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DataDiskURI)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.CachingMode == nil { + r.EncodeNil() + } else { + yy10 := *x.CachingMode + yy10.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cachingMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CachingMode == nil { + r.EncodeNil() + } else { + yy12 := *x.CachingMode + yy12.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.FSType == nil { + r.EncodeNil() + } else { + yy15 := *x.FSType + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy15)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FSType == nil { + r.EncodeNil() + } else { + yy17 := *x.FSType + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy17)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.ReadOnly == nil { + r.EncodeNil() + } else { + yy20 := *x.ReadOnly + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeBool(bool(yy20)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ReadOnly == nil { + r.EncodeNil() + } else { + yy22 := *x.ReadOnly + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(yy22)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.Kind == nil { + r.EncodeNil() + } else { + yy25 := *x.Kind + yy25.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Kind == nil { + r.EncodeNil() + } else { + yy27 := *x.Kind + yy27.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *AzureDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *AzureDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "diskName": + if r.TryDecodeAsNil() { + x.DiskName = "" + } else { + yyv4 := &x.DiskName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "diskURI": + if r.TryDecodeAsNil() { + x.DataDiskURI = "" + } else { + yyv6 := &x.DataDiskURI + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "cachingMode": + if r.TryDecodeAsNil() { + if x.CachingMode != nil { + x.CachingMode = nil + } + } else { + if x.CachingMode == nil { + x.CachingMode = new(AzureDataDiskCachingMode) + } + x.CachingMode.CodecDecodeSelf(d) + } + case "fsType": + if r.TryDecodeAsNil() { + if x.FSType != nil { + x.FSType = nil + } + } else { + if x.FSType == nil { + x.FSType = new(string) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(x.FSType)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + if x.ReadOnly != nil { + x.ReadOnly = nil + } + } else { + if x.ReadOnly == nil { + x.ReadOnly = new(bool) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(x.ReadOnly)) = r.DecodeBool() + } + } + case "kind": + if r.TryDecodeAsNil() { + if x.Kind != nil { + x.Kind = nil + } + } else { + if x.Kind == nil { + x.Kind = new(AzureDataDiskKind) + } + x.Kind.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DiskName = "" + } else { + yyv15 := &x.DiskName + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DataDiskURI = "" + } else { + yyv17 := &x.DataDiskURI + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CachingMode != nil { + x.CachingMode = nil + } + } else { + if x.CachingMode == nil { + x.CachingMode = new(AzureDataDiskCachingMode) + } + x.CachingMode.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FSType != nil { + x.FSType = nil + } + } else { + if x.FSType == nil { + x.FSType = new(string) + } + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(x.FSType)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ReadOnly != nil { + x.ReadOnly = nil + } + } else { + if x.ReadOnly == nil { + x.ReadOnly = new(bool) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*bool)(x.ReadOnly)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Kind != nil { + x.Kind = nil + } + } else { + if x.Kind == nil { + x.Kind = new(AzureDataDiskKind) + } + x.Kind.CodecDecodeSelf(d) + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PortworxVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PortworxVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PortworxVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumeID": + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv4 := &x.VolumeID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PortworxVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv11 := &x.VolumeID + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv13 := &x.FSType + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScaleIOVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[3] = x.SSLEnabled != false + yyq2[4] = x.ProtectionDomain != "" + yyq2[5] = x.StoragePool != "" + yyq2[6] = x.StorageMode != "" + yyq2[7] = x.VolumeName != "" + yyq2[8] = x.FSType != "" + yyq2[9] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Gateway)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gateway")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Gateway)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.System)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("system")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.System)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.SSLEnabled)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sslEnabled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.SSLEnabled)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProtectionDomain)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protectionDomain")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProtectionDomain)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StoragePool)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storagePool")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StoragePool)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StorageMode)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storageMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StorageMode)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScaleIOVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScaleIOVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "gateway": + if r.TryDecodeAsNil() { + x.Gateway = "" + } else { + yyv4 := &x.Gateway + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "system": + if r.TryDecodeAsNil() { + x.System = "" + } else { + yyv6 := &x.System + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + case "sslEnabled": + if r.TryDecodeAsNil() { + x.SSLEnabled = false + } else { + yyv9 := &x.SSLEnabled + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*bool)(yyv9)) = r.DecodeBool() + } + } + case "protectionDomain": + if r.TryDecodeAsNil() { + x.ProtectionDomain = "" + } else { + yyv11 := &x.ProtectionDomain + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + case "storagePool": + if r.TryDecodeAsNil() { + x.StoragePool = "" + } else { + yyv13 := &x.StoragePool + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + case "storageMode": + if r.TryDecodeAsNil() { + x.StorageMode = "" + } else { + yyv15 := &x.StorageMode + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + case "volumeName": + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv17 := &x.VolumeName + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv19 := &x.FSType + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv21 := &x.ReadOnly + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleIOVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Gateway = "" + } else { + yyv24 := &x.Gateway + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.System = "" + } else { + yyv26 := &x.System + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SSLEnabled = false + } else { + yyv29 := &x.SSLEnabled + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ProtectionDomain = "" + } else { + yyv31 := &x.ProtectionDomain + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StoragePool = "" + } else { + yyv33 := &x.StoragePool + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StorageMode = "" + } else { + yyv35 := &x.StorageMode + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv37 := &x.VolumeName + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv39 := &x.FSType + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv41 := &x.ReadOnly + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*bool)(yyv41)) = r.DecodeBool() + } + } + for { + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj23-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StorageOSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.VolumeName != "" + yyq2[1] = x.VolumeNamespace != "" + yyq2[2] = x.FSType != "" + yyq2[3] = x.ReadOnly != false + yyq2[4] = x.SecretRef != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeNamespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeNamespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeNamespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StorageOSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StorageOSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumeName": + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv4 := &x.VolumeName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "volumeNamespace": + if r.TryDecodeAsNil() { + x.VolumeNamespace = "" + } else { + yyv6 := &x.VolumeNamespace + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv8 := &x.FSType + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv10 := &x.ReadOnly + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StorageOSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv14 := &x.VolumeName + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeNamespace = "" + } else { + yyv16 := &x.VolumeNamespace + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv18 := &x.FSType + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv20 := &x.ReadOnly + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*bool)(yyv20)) = r.DecodeBool() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StorageOSPersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.VolumeName != "" + yyq2[1] = x.VolumeNamespace != "" + yyq2[2] = x.FSType != "" + yyq2[3] = x.ReadOnly != false + yyq2[4] = x.SecretRef != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeNamespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeNamespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeNamespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StorageOSPersistentVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StorageOSPersistentVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumeName": + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv4 := &x.VolumeName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "volumeNamespace": + if r.TryDecodeAsNil() { + x.VolumeNamespace = "" + } else { + yyv6 := &x.VolumeNamespace + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv8 := &x.FSType + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv10 := &x.ReadOnly + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(ObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StorageOSPersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv14 := &x.VolumeName + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeNamespace = "" + } else { + yyv16 := &x.VolumeNamespace + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv18 := &x.FSType + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv20 := &x.ReadOnly + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*bool)(yyv20)) = r.DecodeBool() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(ObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = len(x.Items) != 0 + yyq2[2] = x.DefaultMode != nil + yyq2[3] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy10 := *x.DefaultMode + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy12 := *x.DefaultMode + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy15 := *x.Optional + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(yy15)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy17 := *x.Optional + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeBool(bool(yy17)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) + } + } + case "defaultMode": + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv13 := &x.Name + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv15 := &x.Items + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv15), d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapProjection) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = len(x.Items) != 0 + yyq2[2] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy10 := *x.Optional + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy12 := *x.Optional + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapProjection) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv11 := &x.Name + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ProjectedVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.DefaultMode != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Sources == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceVolumeProjection(([]VolumeProjection)(x.Sources), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sources")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Sources == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceVolumeProjection(([]VolumeProjection)(x.Sources), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy7 := *x.DefaultMode + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy9 := *x.DefaultMode + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ProjectedVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ProjectedVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "sources": + if r.TryDecodeAsNil() { + x.Sources = nil + } else { + yyv4 := &x.Sources + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceVolumeProjection((*[]VolumeProjection)(yyv4), d) + } + } + case "defaultMode": + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ProjectedVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Sources = nil + } else { + yyv9 := &x.Sources + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceVolumeProjection((*[]VolumeProjection)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *VolumeProjection) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Secret != nil + yyq2[1] = x.DownwardAPI != nil + yyq2[2] = x.ConfigMap != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secret")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMap")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *VolumeProjection) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *VolumeProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "secret": + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretProjection) + } + x.Secret.CodecDecodeSelf(d) + } + case "downwardAPI": + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIProjection) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + case "configMap": + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapProjection) + } + x.ConfigMap.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *VolumeProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretProjection) + } + x.Secret.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIProjection) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapProjection) + } + x.ConfigMap.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.Mode != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Mode == nil { + r.EncodeNil() + } else { + yy10 := *x.Mode + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("mode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Mode == nil { + r.EncodeNil() + } else { + yy12 := *x.Mode + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *KeyToPath) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *KeyToPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "mode": + if r.TryDecodeAsNil() { + if x.Mode != nil { + x.Mode = nil + } + } else { + if x.Mode == nil { + x.Mode = new(int32) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *KeyToPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv11 := &x.Key + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv13 := &x.Path + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Mode != nil { + x.Mode = nil + } + } else { + if x.Mode == nil { + x.Mode = new(int32) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LocalVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LocalVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LocalVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LocalVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv7 := &x.Path + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerPort) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.HostPort != 0 + yyq2[3] = x.Protocol != "" + yyq2[4] = x.HostIP != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.HostPort)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.HostPort)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.ContainerPort)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerPort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.ContainerPort)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.Protocol.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protocol")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Protocol.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerPort) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "hostPort": + if r.TryDecodeAsNil() { + x.HostPort = 0 + } else { + yyv6 := &x.HostPort + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "containerPort": + if r.TryDecodeAsNil() { + x.ContainerPort = 0 + } else { + yyv8 := &x.ContainerPort + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "protocol": + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv10 := &x.Protocol + yyv10.CodecDecodeSelf(d) + } + case "hostIP": + if r.TryDecodeAsNil() { + x.HostIP = "" + } else { + yyv11 := &x.HostIP + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv14 := &x.Name + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostPort = 0 + } else { + yyv16 := &x.HostPort + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*int32)(yyv16)) = int32(r.DecodeInt(32)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerPort = 0 + } else { + yyv18 := &x.ContainerPort + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int32)(yyv18)) = int32(r.DecodeInt(32)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv20 := &x.Protocol + yyv20.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostIP = "" + } else { + yyv21 := &x.HostIP + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *VolumeMount) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.ReadOnly != false + yyq2[3] = x.SubPath != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("mountPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *VolumeMount) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *VolumeMount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv6 := &x.ReadOnly + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + case "mountPath": + if r.TryDecodeAsNil() { + x.MountPath = "" + } else { + yyv8 := &x.MountPath + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "subPath": + if r.TryDecodeAsNil() { + x.SubPath = "" + } else { + yyv10 := &x.SubPath + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv13 := &x.Name + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MountPath = "" + } else { + yyv17 := &x.MountPath + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SubPath = "" + } else { + yyv19 := &x.SubPath + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EnvVar) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Value != "" + yyq2[2] = x.ValueFrom != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ValueFrom == nil { + r.EncodeNil() + } else { + x.ValueFrom.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("valueFrom")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ValueFrom == nil { + r.EncodeNil() + } else { + x.ValueFrom.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EnvVar) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EnvVar) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv6 := &x.Value + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "valueFrom": + if r.TryDecodeAsNil() { + if x.ValueFrom != nil { + x.ValueFrom = nil + } + } else { + if x.ValueFrom == nil { + x.ValueFrom = new(EnvVarSource) + } + x.ValueFrom.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EnvVar) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv12 := &x.Value + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ValueFrom != nil { + x.ValueFrom = nil + } + } else { + if x.ValueFrom == nil { + x.ValueFrom = new(EnvVarSource) + } + x.ValueFrom.CodecDecodeSelf(d) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.FieldRef != nil + yyq2[1] = x.ResourceFieldRef != nil + yyq2[2] = x.ConfigMapKeyRef != nil + yyq2[3] = x.SecretKeyRef != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ConfigMapKeyRef == nil { + r.EncodeNil() + } else { + x.ConfigMapKeyRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMapKeyRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ConfigMapKeyRef == nil { + r.EncodeNil() + } else { + x.ConfigMapKeyRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.SecretKeyRef == nil { + r.EncodeNil() + } else { + x.SecretKeyRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretKeyRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretKeyRef == nil { + r.EncodeNil() + } else { + x.SecretKeyRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EnvVarSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EnvVarSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "fieldRef": + if r.TryDecodeAsNil() { + if x.FieldRef != nil { + x.FieldRef = nil + } + } else { + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) + } + case "resourceFieldRef": + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) + } + case "configMapKeyRef": + if r.TryDecodeAsNil() { + if x.ConfigMapKeyRef != nil { + x.ConfigMapKeyRef = nil + } + } else { + if x.ConfigMapKeyRef == nil { + x.ConfigMapKeyRef = new(ConfigMapKeySelector) + } + x.ConfigMapKeyRef.CodecDecodeSelf(d) + } + case "secretKeyRef": + if r.TryDecodeAsNil() { + if x.SecretKeyRef != nil { + x.SecretKeyRef = nil + } + } else { + if x.SecretKeyRef == nil { + x.SecretKeyRef = new(SecretKeySelector) + } + x.SecretKeyRef.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FieldRef != nil { + x.FieldRef = nil + } + } else { + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ConfigMapKeyRef != nil { + x.ConfigMapKeyRef = nil + } + } else { + if x.ConfigMapKeyRef == nil { + x.ConfigMapKeyRef = new(ConfigMapKeySelector) + } + x.ConfigMapKeyRef.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretKeyRef != nil { + x.SecretKeyRef = nil + } + } else { + if x.SecretKeyRef == nil { + x.SecretKeyRef = new(SecretKeySelector) + } + x.SecretKeyRef.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv4 := &x.APIVersion + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fieldPath": + if r.TryDecodeAsNil() { + x.FieldPath = "" + } else { + yyv6 := &x.FieldPath + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv9 := &x.APIVersion + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FieldPath = "" + } else { + yyv11 := &x.FieldPath + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ContainerName != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.Divisor + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("divisor")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.Divisor + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "containerName": + if r.TryDecodeAsNil() { + x.ContainerName = "" + } else { + yyv4 := &x.ContainerName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "resource": + if r.TryDecodeAsNil() { + x.Resource = "" + } else { + yyv6 := &x.Resource + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "divisor": + if r.TryDecodeAsNil() { + x.Divisor = pkg3_resource.Quantity{} + } else { + yyv8 := &x.Divisor + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerName = "" + } else { + yyv11 := &x.ContainerName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resource = "" + } else { + yyv13 := &x.Resource + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Divisor = pkg3_resource.Quantity{} + } else { + yyv15 := &x.Divisor + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv15) + } else { + z.DecFallback(yyv15, false) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[2] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy10 := *x.Optional + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy12 := *x.Optional + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv11 := &x.Name + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv13 := &x.Key + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[2] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy10 := *x.Optional + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy12 := *x.Optional + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv11 := &x.Name + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv13 := &x.Key + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EnvFromSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Prefix != "" + yyq2[1] = x.ConfigMapRef != nil + yyq2[2] = x.SecretRef != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Prefix)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("prefix")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Prefix)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.ConfigMapRef == nil { + r.EncodeNil() + } else { + x.ConfigMapRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMapRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ConfigMapRef == nil { + r.EncodeNil() + } else { + x.ConfigMapRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EnvFromSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EnvFromSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "configMapRef": + if r.TryDecodeAsNil() { + if x.ConfigMapRef != nil { + x.ConfigMapRef = nil + } + } else { + if x.ConfigMapRef == nil { + x.ConfigMapRef = new(ConfigMapEnvSource) + } + x.ConfigMapRef.CodecDecodeSelf(d) + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(SecretEnvSource) + } + x.SecretRef.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EnvFromSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv9 := &x.Prefix + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ConfigMapRef != nil { + x.ConfigMapRef = nil + } + } else { + if x.ConfigMapRef == nil { + x.ConfigMapRef = new(ConfigMapEnvSource) + } + x.ConfigMapRef.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(SecretEnvSource) + } + x.SecretRef.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapEnvSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy7 := *x.Optional + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy9 := *x.Optional + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapEnvSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapEnvSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapEnvSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecretEnvSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy7 := *x.Optional + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy9 := *x.Optional + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretEnvSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretEnvSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretEnvSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HTTPHeader) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HTTPHeader) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv6 := &x.Value + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HTTPHeader) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv11 := &x.Value + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HTTPGetAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Path != "" + yyq2[2] = x.Host != "" + yyq2[3] = x.Scheme != "" + yyq2[4] = len(x.HTTPHeaders) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.Port + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.Port + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("host")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.Scheme.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scheme")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Scheme.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.HTTPHeaders == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("httpHeaders")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HTTPHeaders == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HTTPGetAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HTTPGetAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "port": + if r.TryDecodeAsNil() { + x.Port = pkg4_intstr.IntOrString{} + } else { + yyv6 := &x.Port + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "host": + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv8 := &x.Host + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "scheme": + if r.TryDecodeAsNil() { + x.Scheme = "" + } else { + yyv10 := &x.Scheme + yyv10.CodecDecodeSelf(d) + } + case "httpHeaders": + if r.TryDecodeAsNil() { + x.HTTPHeaders = nil + } else { + yyv11 := &x.HTTPHeaders + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceHTTPHeader((*[]HTTPHeader)(yyv11), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv14 := &x.Path + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Port = pkg4_intstr.IntOrString{} + } else { + yyv16 := &x.Port + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else if !yym17 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv16) + } else { + z.DecFallback(yyv16, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv18 := &x.Host + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Scheme = "" + } else { + yyv20 := &x.Scheme + yyv20.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HTTPHeaders = nil + } else { + yyv21 := &x.HTTPHeaders + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + h.decSliceHTTPHeader((*[]HTTPHeader)(yyv21), d) + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x URIScheme) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *URIScheme) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *TCPSocketAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Host != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Port + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(yy4) + } else { + z.EncFallback(yy4) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Port + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(yy6) + } else { + z.EncFallback(yy6) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("host")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *TCPSocketAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *TCPSocketAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "port": + if r.TryDecodeAsNil() { + x.Port = pkg4_intstr.IntOrString{} + } else { + yyv4 := &x.Port + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) + } else { + z.DecFallback(yyv4, false) + } + } + case "host": + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv6 := &x.Host + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *TCPSocketAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Port = pkg4_intstr.IntOrString{} + } else { + yyv9 := &x.Port + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv11 := &x.Host + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ExecAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Command) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Command == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("command")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Command == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ExecAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ExecAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "command": + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv4 := &x.Command + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ExecAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv7 := &x.Command + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + z.F.DecSliceStringX(yyv7, false, d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Handler.Exec != nil && x.Exec != nil + yyq2[1] = x.Handler.HTTPGet != nil && x.HTTPGet != nil + yyq2[2] = x.Handler.TCPSocket != nil && x.TCPSocket != nil + yyq2[3] = x.InitialDelaySeconds != 0 + yyq2[4] = x.TimeoutSeconds != 0 + yyq2[5] = x.PeriodSeconds != 0 + yyq2[6] = x.SuccessThreshold != 0 + yyq2[7] = x.FailureThreshold != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + var yyn3 bool + if x.Handler.Exec == nil { + yyn3 = true + goto LABEL3 + } + LABEL3: + if yyr2 || yy2arr2 { + if yyn3 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Exec == nil { + r.EncodeNil() + } else { + x.Exec.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("exec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn3 { + r.EncodeNil() + } else { + if x.Exec == nil { + r.EncodeNil() + } else { + x.Exec.CodecEncodeSelf(e) + } + } + } + } + var yyn6 bool + if x.Handler.HTTPGet == nil { + yyn6 = true + goto LABEL6 + } + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.HTTPGet == nil { + r.EncodeNil() + } else { + x.HTTPGet.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("httpGet")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn6 { + r.EncodeNil() + } else { + if x.HTTPGet == nil { + r.EncodeNil() + } else { + x.HTTPGet.CodecEncodeSelf(e) + } + } + } + } + var yyn9 bool + if x.Handler.TCPSocket == nil { + yyn9 = true + goto LABEL9 + } + LABEL9: + if yyr2 || yy2arr2 { + if yyn9 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.TCPSocket == nil { + r.EncodeNil() + } else { + x.TCPSocket.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tcpSocket")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn9 { + r.EncodeNil() + } else { + if x.TCPSocket == nil { + r.EncodeNil() + } else { + x.TCPSocket.CodecEncodeSelf(e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.InitialDelaySeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("initialDelaySeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.InitialDelaySeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.TimeoutSeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.TimeoutSeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.PeriodSeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("periodSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.PeriodSeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(x.SuccessThreshold)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("successThreshold")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(x.SuccessThreshold)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeInt(int64(x.FailureThreshold)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("failureThreshold")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(x.FailureThreshold)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Probe) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Probe) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "exec": + if x.Handler.Exec == nil { + x.Handler.Exec = new(ExecAction) + } + if r.TryDecodeAsNil() { + if x.Exec != nil { + x.Exec = nil + } + } else { + if x.Exec == nil { + x.Exec = new(ExecAction) + } + x.Exec.CodecDecodeSelf(d) + } + case "httpGet": + if x.Handler.HTTPGet == nil { + x.Handler.HTTPGet = new(HTTPGetAction) + } + if r.TryDecodeAsNil() { + if x.HTTPGet != nil { + x.HTTPGet = nil + } + } else { + if x.HTTPGet == nil { + x.HTTPGet = new(HTTPGetAction) + } + x.HTTPGet.CodecDecodeSelf(d) + } + case "tcpSocket": + if x.Handler.TCPSocket == nil { + x.Handler.TCPSocket = new(TCPSocketAction) + } + if r.TryDecodeAsNil() { + if x.TCPSocket != nil { + x.TCPSocket = nil + } + } else { + if x.TCPSocket == nil { + x.TCPSocket = new(TCPSocketAction) + } + x.TCPSocket.CodecDecodeSelf(d) + } + case "initialDelaySeconds": + if r.TryDecodeAsNil() { + x.InitialDelaySeconds = 0 + } else { + yyv7 := &x.InitialDelaySeconds + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + case "timeoutSeconds": + if r.TryDecodeAsNil() { + x.TimeoutSeconds = 0 + } else { + yyv9 := &x.TimeoutSeconds + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int32)(yyv9)) = int32(r.DecodeInt(32)) + } + } + case "periodSeconds": + if r.TryDecodeAsNil() { + x.PeriodSeconds = 0 + } else { + yyv11 := &x.PeriodSeconds + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } + } + case "successThreshold": + if r.TryDecodeAsNil() { + x.SuccessThreshold = 0 + } else { + yyv13 := &x.SuccessThreshold + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int32)(yyv13)) = int32(r.DecodeInt(32)) + } + } + case "failureThreshold": + if r.TryDecodeAsNil() { + x.FailureThreshold = 0 + } else { + yyv15 := &x.FailureThreshold + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(yyv15)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj17 int + var yyb17 bool + var yyhl17 bool = l >= 0 + if x.Handler.Exec == nil { + x.Handler.Exec = new(ExecAction) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Exec != nil { + x.Exec = nil + } + } else { + if x.Exec == nil { + x.Exec = new(ExecAction) + } + x.Exec.CodecDecodeSelf(d) + } + if x.Handler.HTTPGet == nil { + x.Handler.HTTPGet = new(HTTPGetAction) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HTTPGet != nil { + x.HTTPGet = nil + } + } else { + if x.HTTPGet == nil { + x.HTTPGet = new(HTTPGetAction) + } + x.HTTPGet.CodecDecodeSelf(d) + } + if x.Handler.TCPSocket == nil { + x.Handler.TCPSocket = new(TCPSocketAction) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TCPSocket != nil { + x.TCPSocket = nil + } + } else { + if x.TCPSocket == nil { + x.TCPSocket = new(TCPSocketAction) + } + x.TCPSocket.CodecDecodeSelf(d) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.InitialDelaySeconds = 0 + } else { + yyv21 := &x.InitialDelaySeconds + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TimeoutSeconds = 0 + } else { + yyv23 := &x.TimeoutSeconds + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PeriodSeconds = 0 + } else { + yyv25 := &x.PeriodSeconds + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SuccessThreshold = 0 + } else { + yyv27 := &x.SuccessThreshold + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FailureThreshold = 0 + } else { + yyv29 := &x.FailureThreshold + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*int32)(yyv29)) = int32(r.DecodeInt(32)) + } + } + for { + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj17-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x PullPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PullPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x TerminationMessagePolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *TerminationMessagePolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x Capability) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *Capability) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *Capabilities) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Add) != 0 + yyq2[1] = len(x.Drop) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Add == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceCapability(([]Capability)(x.Add), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("add")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Add == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceCapability(([]Capability)(x.Add), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Drop == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceCapability(([]Capability)(x.Drop), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("drop")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Drop == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceCapability(([]Capability)(x.Drop), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Capabilities) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Capabilities) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "add": + if r.TryDecodeAsNil() { + x.Add = nil + } else { + yyv4 := &x.Add + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceCapability((*[]Capability)(yyv4), d) + } + } + case "drop": + if r.TryDecodeAsNil() { + x.Drop = nil + } else { + yyv6 := &x.Drop + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceCapability((*[]Capability)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Capabilities) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Add = nil + } else { + yyv9 := &x.Add + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceCapability((*[]Capability)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Drop = nil + } else { + yyv11 := &x.Drop + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceCapability((*[]Capability)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceRequirements) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Limits) != 0 + yyq2[1] = len(x.Requests) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Limits == nil { + r.EncodeNil() + } else { + x.Limits.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("limits")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Limits == nil { + r.EncodeNil() + } else { + x.Limits.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Requests == nil { + r.EncodeNil() + } else { + x.Requests.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requests")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Requests == nil { + r.EncodeNil() + } else { + x.Requests.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceRequirements) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceRequirements) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "limits": + if r.TryDecodeAsNil() { + x.Limits = nil + } else { + yyv4 := &x.Limits + yyv4.CodecDecodeSelf(d) + } + case "requests": + if r.TryDecodeAsNil() { + x.Requests = nil + } else { + yyv5 := &x.Requests + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceRequirements) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Limits = nil + } else { + yyv7 := &x.Limits + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Requests = nil + } else { + yyv8 := &x.Requests + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [20]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = len(x.Command) != 0 + yyq2[3] = len(x.Args) != 0 + yyq2[4] = x.WorkingDir != "" + yyq2[5] = len(x.Ports) != 0 + yyq2[6] = len(x.EnvFrom) != 0 + yyq2[7] = len(x.Env) != 0 + yyq2[8] = true + yyq2[9] = len(x.VolumeMounts) != 0 + yyq2[10] = x.LivenessProbe != nil + yyq2[11] = x.ReadinessProbe != nil + yyq2[12] = x.Lifecycle != nil + yyq2[13] = x.TerminationMessagePath != "" + yyq2[14] = x.TerminationMessagePolicy != "" + yyq2[15] = x.ImagePullPolicy != "" + yyq2[16] = x.SecurityContext != nil + yyq2[17] = x.Stdin != false + yyq2[18] = x.StdinOnce != false + yyq2[19] = x.TTY != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(20) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Image)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("image")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Image)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Command == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("command")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Command == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Args == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncSliceStringV(x.Args, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("args")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Args == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncSliceStringV(x.Args, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("workingDir")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.Ports == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceContainerPort(([]ContainerPort)(x.Ports), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + h.encSliceContainerPort(([]ContainerPort)(x.Ports), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.EnvFrom == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + h.encSliceEnvFromSource(([]EnvFromSource)(x.EnvFrom), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("envFrom")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.EnvFrom == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + h.encSliceEnvFromSource(([]EnvFromSource)(x.EnvFrom), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.Env == nil { + r.EncodeNil() + } else { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + h.encSliceEnvVar(([]EnvVar)(x.Env), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("env")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Env == nil { + r.EncodeNil() + } else { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + h.encSliceEnvVar(([]EnvVar)(x.Env), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yy28 := &x.Resources + yy28.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resources")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy30 := &x.Resources + yy30.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.VolumeMounts == nil { + r.EncodeNil() + } else { + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeMounts")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VolumeMounts == nil { + r.EncodeNil() + } else { + yym34 := z.EncBinary() + _ = yym34 + if false { + } else { + h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.LivenessProbe == nil { + r.EncodeNil() + } else { + x.LivenessProbe.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("livenessProbe")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LivenessProbe == nil { + r.EncodeNil() + } else { + x.LivenessProbe.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.ReadinessProbe == nil { + r.EncodeNil() + } else { + x.ReadinessProbe.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readinessProbe")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ReadinessProbe == nil { + r.EncodeNil() + } else { + x.ReadinessProbe.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.Lifecycle == nil { + r.EncodeNil() + } else { + x.Lifecycle.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lifecycle")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Lifecycle == nil { + r.EncodeNil() + } else { + x.Lifecycle.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + yym45 := z.EncBinary() + _ = yym45 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("terminationMessagePath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym46 := z.EncBinary() + _ = yym46 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + x.TerminationMessagePolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("terminationMessagePolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.TerminationMessagePolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + x.ImagePullPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("imagePullPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.ImagePullPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.SecurityContext == nil { + r.EncodeNil() + } else { + x.SecurityContext.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("securityContext")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecurityContext == nil { + r.EncodeNil() + } else { + x.SecurityContext.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + yym57 := z.EncBinary() + _ = yym57 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdin")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym58 := z.EncBinary() + _ = yym58 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + yym60 := z.EncBinary() + _ = yym60 + if false { + } else { + r.EncodeBool(bool(x.StdinOnce)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdinOnce")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym61 := z.EncBinary() + _ = yym61 + if false { + } else { + r.EncodeBool(bool(x.StdinOnce)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + yym63 := z.EncBinary() + _ = yym63 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tty")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym64 := z.EncBinary() + _ = yym64 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Container) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Container) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "image": + if r.TryDecodeAsNil() { + x.Image = "" + } else { + yyv6 := &x.Image + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "command": + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv8 := &x.Command + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "args": + if r.TryDecodeAsNil() { + x.Args = nil + } else { + yyv10 := &x.Args + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecSliceStringX(yyv10, false, d) + } + } + case "workingDir": + if r.TryDecodeAsNil() { + x.WorkingDir = "" + } else { + yyv12 := &x.WorkingDir + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "ports": + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv14 := &x.Ports + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decSliceContainerPort((*[]ContainerPort)(yyv14), d) + } + } + case "envFrom": + if r.TryDecodeAsNil() { + x.EnvFrom = nil + } else { + yyv16 := &x.EnvFrom + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + h.decSliceEnvFromSource((*[]EnvFromSource)(yyv16), d) + } + } + case "env": + if r.TryDecodeAsNil() { + x.Env = nil + } else { + yyv18 := &x.Env + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + h.decSliceEnvVar((*[]EnvVar)(yyv18), d) + } + } + case "resources": + if r.TryDecodeAsNil() { + x.Resources = ResourceRequirements{} + } else { + yyv20 := &x.Resources + yyv20.CodecDecodeSelf(d) + } + case "volumeMounts": + if r.TryDecodeAsNil() { + x.VolumeMounts = nil + } else { + yyv21 := &x.VolumeMounts + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + h.decSliceVolumeMount((*[]VolumeMount)(yyv21), d) + } + } + case "livenessProbe": + if r.TryDecodeAsNil() { + if x.LivenessProbe != nil { + x.LivenessProbe = nil + } + } else { + if x.LivenessProbe == nil { + x.LivenessProbe = new(Probe) + } + x.LivenessProbe.CodecDecodeSelf(d) + } + case "readinessProbe": + if r.TryDecodeAsNil() { + if x.ReadinessProbe != nil { + x.ReadinessProbe = nil + } + } else { + if x.ReadinessProbe == nil { + x.ReadinessProbe = new(Probe) + } + x.ReadinessProbe.CodecDecodeSelf(d) + } + case "lifecycle": + if r.TryDecodeAsNil() { + if x.Lifecycle != nil { + x.Lifecycle = nil + } + } else { + if x.Lifecycle == nil { + x.Lifecycle = new(Lifecycle) + } + x.Lifecycle.CodecDecodeSelf(d) + } + case "terminationMessagePath": + if r.TryDecodeAsNil() { + x.TerminationMessagePath = "" + } else { + yyv26 := &x.TerminationMessagePath + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + case "terminationMessagePolicy": + if r.TryDecodeAsNil() { + x.TerminationMessagePolicy = "" + } else { + yyv28 := &x.TerminationMessagePolicy + yyv28.CodecDecodeSelf(d) + } + case "imagePullPolicy": + if r.TryDecodeAsNil() { + x.ImagePullPolicy = "" + } else { + yyv29 := &x.ImagePullPolicy + yyv29.CodecDecodeSelf(d) + } + case "securityContext": + if r.TryDecodeAsNil() { + if x.SecurityContext != nil { + x.SecurityContext = nil + } + } else { + if x.SecurityContext == nil { + x.SecurityContext = new(SecurityContext) + } + x.SecurityContext.CodecDecodeSelf(d) + } + case "stdin": + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv31 := &x.Stdin + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*bool)(yyv31)) = r.DecodeBool() + } + } + case "stdinOnce": + if r.TryDecodeAsNil() { + x.StdinOnce = false + } else { + yyv33 := &x.StdinOnce + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*bool)(yyv33)) = r.DecodeBool() + } + } + case "tty": + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv35 := &x.TTY + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*bool)(yyv35)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj37 int + var yyb37 bool + var yyhl37 bool = l >= 0 + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv38 := &x.Name + yym39 := z.DecBinary() + _ = yym39 + if false { + } else { + *((*string)(yyv38)) = r.DecodeString() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Image = "" + } else { + yyv40 := &x.Image + yym41 := z.DecBinary() + _ = yym41 + if false { + } else { + *((*string)(yyv40)) = r.DecodeString() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv42 := &x.Command + yym43 := z.DecBinary() + _ = yym43 + if false { + } else { + z.F.DecSliceStringX(yyv42, false, d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Args = nil + } else { + yyv44 := &x.Args + yym45 := z.DecBinary() + _ = yym45 + if false { + } else { + z.F.DecSliceStringX(yyv44, false, d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.WorkingDir = "" + } else { + yyv46 := &x.WorkingDir + yym47 := z.DecBinary() + _ = yym47 + if false { + } else { + *((*string)(yyv46)) = r.DecodeString() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv48 := &x.Ports + yym49 := z.DecBinary() + _ = yym49 + if false { + } else { + h.decSliceContainerPort((*[]ContainerPort)(yyv48), d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EnvFrom = nil + } else { + yyv50 := &x.EnvFrom + yym51 := z.DecBinary() + _ = yym51 + if false { + } else { + h.decSliceEnvFromSource((*[]EnvFromSource)(yyv50), d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Env = nil + } else { + yyv52 := &x.Env + yym53 := z.DecBinary() + _ = yym53 + if false { + } else { + h.decSliceEnvVar((*[]EnvVar)(yyv52), d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resources = ResourceRequirements{} + } else { + yyv54 := &x.Resources + yyv54.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeMounts = nil + } else { + yyv55 := &x.VolumeMounts + yym56 := z.DecBinary() + _ = yym56 + if false { + } else { + h.decSliceVolumeMount((*[]VolumeMount)(yyv55), d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LivenessProbe != nil { + x.LivenessProbe = nil + } + } else { + if x.LivenessProbe == nil { + x.LivenessProbe = new(Probe) + } + x.LivenessProbe.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ReadinessProbe != nil { + x.ReadinessProbe = nil + } + } else { + if x.ReadinessProbe == nil { + x.ReadinessProbe = new(Probe) + } + x.ReadinessProbe.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Lifecycle != nil { + x.Lifecycle = nil + } + } else { + if x.Lifecycle == nil { + x.Lifecycle = new(Lifecycle) + } + x.Lifecycle.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TerminationMessagePath = "" + } else { + yyv60 := &x.TerminationMessagePath + yym61 := z.DecBinary() + _ = yym61 + if false { + } else { + *((*string)(yyv60)) = r.DecodeString() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TerminationMessagePolicy = "" + } else { + yyv62 := &x.TerminationMessagePolicy + yyv62.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ImagePullPolicy = "" + } else { + yyv63 := &x.ImagePullPolicy + yyv63.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecurityContext != nil { + x.SecurityContext = nil + } + } else { + if x.SecurityContext == nil { + x.SecurityContext = new(SecurityContext) + } + x.SecurityContext.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv65 := &x.Stdin + yym66 := z.DecBinary() + _ = yym66 + if false { + } else { + *((*bool)(yyv65)) = r.DecodeBool() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StdinOnce = false + } else { + yyv67 := &x.StdinOnce + yym68 := z.DecBinary() + _ = yym68 + if false { + } else { + *((*bool)(yyv67)) = r.DecodeBool() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv69 := &x.TTY + yym70 := z.DecBinary() + _ = yym70 + if false { + } else { + *((*bool)(yyv69)) = r.DecodeBool() + } + } + for { + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj37-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Handler) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Exec != nil + yyq2[1] = x.HTTPGet != nil + yyq2[2] = x.TCPSocket != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Exec == nil { + r.EncodeNil() + } else { + x.Exec.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("exec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Exec == nil { + r.EncodeNil() + } else { + x.Exec.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.HTTPGet == nil { + r.EncodeNil() + } else { + x.HTTPGet.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("httpGet")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HTTPGet == nil { + r.EncodeNil() + } else { + x.HTTPGet.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.TCPSocket == nil { + r.EncodeNil() + } else { + x.TCPSocket.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tcpSocket")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TCPSocket == nil { + r.EncodeNil() + } else { + x.TCPSocket.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Handler) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Handler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "exec": + if r.TryDecodeAsNil() { + if x.Exec != nil { + x.Exec = nil + } + } else { + if x.Exec == nil { + x.Exec = new(ExecAction) + } + x.Exec.CodecDecodeSelf(d) + } + case "httpGet": + if r.TryDecodeAsNil() { + if x.HTTPGet != nil { + x.HTTPGet = nil + } + } else { + if x.HTTPGet == nil { + x.HTTPGet = new(HTTPGetAction) + } + x.HTTPGet.CodecDecodeSelf(d) + } + case "tcpSocket": + if r.TryDecodeAsNil() { + if x.TCPSocket != nil { + x.TCPSocket = nil + } + } else { + if x.TCPSocket == nil { + x.TCPSocket = new(TCPSocketAction) + } + x.TCPSocket.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Handler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Exec != nil { + x.Exec = nil + } + } else { + if x.Exec == nil { + x.Exec = new(ExecAction) + } + x.Exec.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HTTPGet != nil { + x.HTTPGet = nil + } + } else { + if x.HTTPGet == nil { + x.HTTPGet = new(HTTPGetAction) + } + x.HTTPGet.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TCPSocket != nil { + x.TCPSocket = nil + } + } else { + if x.TCPSocket == nil { + x.TCPSocket = new(TCPSocketAction) + } + x.TCPSocket.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Lifecycle) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.PostStart != nil + yyq2[1] = x.PreStop != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.PostStart == nil { + r.EncodeNil() + } else { + x.PostStart.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("postStart")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PostStart == nil { + r.EncodeNil() + } else { + x.PostStart.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreStop == nil { + r.EncodeNil() + } else { + x.PreStop.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preStop")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreStop == nil { + r.EncodeNil() + } else { + x.PreStop.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Lifecycle) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Lifecycle) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "postStart": + if r.TryDecodeAsNil() { + if x.PostStart != nil { + x.PostStart = nil + } + } else { + if x.PostStart == nil { + x.PostStart = new(Handler) + } + x.PostStart.CodecDecodeSelf(d) + } + case "preStop": + if r.TryDecodeAsNil() { + if x.PreStop != nil { + x.PreStop = nil + } + } else { + if x.PreStop == nil { + x.PreStop = new(Handler) + } + x.PreStop.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Lifecycle) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PostStart != nil { + x.PostStart = nil + } + } else { + if x.PostStart == nil { + x.PostStart = new(Handler) + } + x.PostStart.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PreStop != nil { + x.PreStop = nil + } + } else { + if x.PreStop == nil { + x.PreStop = new(Handler) + } + x.PreStop.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ConditionStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ConditionStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ContainerStateWaiting) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Reason != "" + yyq2[1] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerStateWaiting) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerStateWaiting) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv4 := &x.Reason + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv6 := &x.Message + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerStateWaiting) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv9 := &x.Reason + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv11 := &x.Message + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerStateRunning) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.StartedAt + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else if yym5 { + z.EncBinaryMarshal(yy4) + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(yy4) + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startedAt")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.StartedAt + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else if yym7 { + z.EncBinaryMarshal(yy6) + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(yy6) + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerStateRunning) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerStateRunning) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "startedAt": + if r.TryDecodeAsNil() { + x.StartedAt = pkg2_v1.Time{} + } else { + yyv4 := &x.StartedAt + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if yym5 { + z.DecBinaryUnmarshal(yyv4) + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) + } else { + z.DecFallback(yyv4, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerStateRunning) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StartedAt = pkg2_v1.Time{} + } else { + yyv7 := &x.StartedAt + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if yym8 { + z.DecBinaryUnmarshal(yyv7) + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerStateTerminated) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Signal != 0 + yyq2[2] = x.Reason != "" + yyq2[3] = x.Message != "" + yyq2[4] = true + yyq2[5] = true + yyq2[6] = x.ContainerID != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.ExitCode)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("exitCode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.ExitCode)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Signal)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("signal")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Signal)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy16 := &x.StartedAt + yym17 := z.EncBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.EncExt(yy16) { + } else if yym17 { + z.EncBinaryMarshal(yy16) + } else if !yym17 && z.IsJSONHandle() { + z.EncJSONMarshal(yy16) + } else { + z.EncFallback(yy16) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startedAt")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy18 := &x.StartedAt + yym19 := z.EncBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.EncExt(yy18) { + } else if yym19 { + z.EncBinaryMarshal(yy18) + } else if !yym19 && z.IsJSONHandle() { + z.EncJSONMarshal(yy18) + } else { + z.EncFallback(yy18) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yy21 := &x.FinishedAt + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.EncExt(yy21) { + } else if yym22 { + z.EncBinaryMarshal(yy21) + } else if !yym22 && z.IsJSONHandle() { + z.EncJSONMarshal(yy21) + } else { + z.EncFallback(yy21) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("finishedAt")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy23 := &x.FinishedAt + yym24 := z.EncBinary() + _ = yym24 + if false { + } else if z.HasExtensions() && z.EncExt(yy23) { + } else if yym24 { + z.EncBinaryMarshal(yy23) + } else if !yym24 && z.IsJSONHandle() { + z.EncJSONMarshal(yy23) + } else { + z.EncFallback(yy23) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym27 := z.EncBinary() + _ = yym27 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerStateTerminated) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerStateTerminated) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "exitCode": + if r.TryDecodeAsNil() { + x.ExitCode = 0 + } else { + yyv4 := &x.ExitCode + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "signal": + if r.TryDecodeAsNil() { + x.Signal = 0 + } else { + yyv6 := &x.Signal + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv8 := &x.Reason + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv10 := &x.Message + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "startedAt": + if r.TryDecodeAsNil() { + x.StartedAt = pkg2_v1.Time{} + } else { + yyv12 := &x.StartedAt + yym13 := z.DecBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.DecExt(yyv12) { + } else if yym13 { + z.DecBinaryUnmarshal(yyv12) + } else if !yym13 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv12) + } else { + z.DecFallback(yyv12, false) + } + } + case "finishedAt": + if r.TryDecodeAsNil() { + x.FinishedAt = pkg2_v1.Time{} + } else { + yyv14 := &x.FinishedAt + yym15 := z.DecBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.DecExt(yyv14) { + } else if yym15 { + z.DecBinaryUnmarshal(yyv14) + } else if !yym15 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv14) + } else { + z.DecFallback(yyv14, false) + } + } + case "containerID": + if r.TryDecodeAsNil() { + x.ContainerID = "" + } else { + yyv16 := &x.ContainerID + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExitCode = 0 + } else { + yyv19 := &x.ExitCode + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Signal = 0 + } else { + yyv21 := &x.Signal + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv23 := &x.Reason + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv25 := &x.Message + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StartedAt = pkg2_v1.Time{} + } else { + yyv27 := &x.StartedAt + yym28 := z.DecBinary() + _ = yym28 + if false { + } else if z.HasExtensions() && z.DecExt(yyv27) { + } else if yym28 { + z.DecBinaryUnmarshal(yyv27) + } else if !yym28 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv27) + } else { + z.DecFallback(yyv27, false) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FinishedAt = pkg2_v1.Time{} + } else { + yyv29 := &x.FinishedAt + yym30 := z.DecBinary() + _ = yym30 + if false { + } else if z.HasExtensions() && z.DecExt(yyv29) { + } else if yym30 { + z.DecBinaryUnmarshal(yyv29) + } else if !yym30 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv29) + } else { + z.DecFallback(yyv29, false) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerID = "" + } else { + yyv31 := &x.ContainerID + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerState) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Waiting != nil + yyq2[1] = x.Running != nil + yyq2[2] = x.Terminated != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Waiting == nil { + r.EncodeNil() + } else { + x.Waiting.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("waiting")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Waiting == nil { + r.EncodeNil() + } else { + x.Waiting.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Running == nil { + r.EncodeNil() + } else { + x.Running.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("running")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Running == nil { + r.EncodeNil() + } else { + x.Running.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Terminated == nil { + r.EncodeNil() + } else { + x.Terminated.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("terminated")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Terminated == nil { + r.EncodeNil() + } else { + x.Terminated.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerState) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerState) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "waiting": + if r.TryDecodeAsNil() { + if x.Waiting != nil { + x.Waiting = nil + } + } else { + if x.Waiting == nil { + x.Waiting = new(ContainerStateWaiting) + } + x.Waiting.CodecDecodeSelf(d) + } + case "running": + if r.TryDecodeAsNil() { + if x.Running != nil { + x.Running = nil + } + } else { + if x.Running == nil { + x.Running = new(ContainerStateRunning) + } + x.Running.CodecDecodeSelf(d) + } + case "terminated": + if r.TryDecodeAsNil() { + if x.Terminated != nil { + x.Terminated = nil + } + } else { + if x.Terminated == nil { + x.Terminated = new(ContainerStateTerminated) + } + x.Terminated.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerState) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Waiting != nil { + x.Waiting = nil + } + } else { + if x.Waiting == nil { + x.Waiting = new(ContainerStateWaiting) + } + x.Waiting.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Running != nil { + x.Running = nil + } + } else { + if x.Running == nil { + x.Running = new(ContainerStateRunning) + } + x.Running.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Terminated != nil { + x.Terminated = nil + } + } else { + if x.Terminated == nil { + x.Terminated = new(ContainerStateTerminated) + } + x.Terminated.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = true + yyq2[2] = true + yyq2[7] = x.ContainerID != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 5 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy7 := &x.State + yy7.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("state")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.State + yy9.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy12 := &x.LastTerminationState + yy12.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastState")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.LastTerminationState + yy14.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Ready)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ready")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeBool(bool(x.Ready)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.RestartCount)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("restartCount")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeInt(int64(x.RestartCount)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Image)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("image")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Image)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ImageID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("imageID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym27 := z.EncBinary() + _ = yym27 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ImageID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "state": + if r.TryDecodeAsNil() { + x.State = ContainerState{} + } else { + yyv6 := &x.State + yyv6.CodecDecodeSelf(d) + } + case "lastState": + if r.TryDecodeAsNil() { + x.LastTerminationState = ContainerState{} + } else { + yyv7 := &x.LastTerminationState + yyv7.CodecDecodeSelf(d) + } + case "ready": + if r.TryDecodeAsNil() { + x.Ready = false + } else { + yyv8 := &x.Ready + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "restartCount": + if r.TryDecodeAsNil() { + x.RestartCount = 0 + } else { + yyv10 := &x.RestartCount + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "image": + if r.TryDecodeAsNil() { + x.Image = "" + } else { + yyv12 := &x.Image + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "imageID": + if r.TryDecodeAsNil() { + x.ImageID = "" + } else { + yyv14 := &x.ImageID + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "containerID": + if r.TryDecodeAsNil() { + x.ContainerID = "" + } else { + yyv16 := &x.ContainerID + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv19 := &x.Name + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.State = ContainerState{} + } else { + yyv21 := &x.State + yyv21.CodecDecodeSelf(d) + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTerminationState = ContainerState{} + } else { + yyv22 := &x.LastTerminationState + yyv22.CodecDecodeSelf(d) + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ready = false + } else { + yyv23 := &x.Ready + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RestartCount = 0 + } else { + yyv25 := &x.RestartCount + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Image = "" + } else { + yyv27 := &x.Image + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ImageID = "" + } else { + yyv29 := &x.ImageID + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerID = "" + } else { + yyv31 := &x.ContainerID + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x PodPhase) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PodPhase) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x PodConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PodConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *PodCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Status.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Status.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastProbeTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastProbeTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastProbeTime": + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg2_v1.Time{} + } else { + yyv6 := &x.LastProbeTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg2_v1.Time{} + } else { + yyv17 := &x.LastProbeTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x RestartPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *RestartPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x DNSPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DNSPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *NodeSelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.NodeSelectorTerms == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeSelectorTerms")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NodeSelectorTerms == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeSelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "nodeSelectorTerms": + if r.TryDecodeAsNil() { + x.NodeSelectorTerms = nil + } else { + yyv4 := &x.NodeSelectorTerms + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodeSelectorTerms = nil + } else { + yyv7 := &x.NodeSelectorTerms + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeSelectorTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.MatchExpressions == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("matchExpressions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MatchExpressions == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeSelectorTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeSelectorTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "matchExpressions": + if r.TryDecodeAsNil() { + x.MatchExpressions = nil + } else { + yyv4 := &x.MatchExpressions + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeSelectorTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MatchExpressions = nil + } else { + yyv7 := &x.MatchExpressions + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = len(x.Values) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Operator.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("operator")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Operator.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Values == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceStringV(x.Values, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("values")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Values == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceStringV(x.Values, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "operator": + if r.TryDecodeAsNil() { + x.Operator = "" + } else { + yyv6 := &x.Operator + yyv6.CodecDecodeSelf(d) + } + case "values": + if r.TryDecodeAsNil() { + x.Values = nil + } else { + yyv7 := &x.Values + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + z.F.DecSliceStringX(yyv7, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv10 := &x.Key + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Operator = "" + } else { + yyv12 := &x.Operator + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Values = nil + } else { + yyv13 := &x.Values + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecSliceStringX(yyv13, false, d) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x NodeSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *NodeSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.NodeAffinity != nil + yyq2[1] = x.PodAffinity != nil + yyq2[2] = x.PodAntiAffinity != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.NodeAffinity == nil { + r.EncodeNil() + } else { + x.NodeAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NodeAffinity == nil { + r.EncodeNil() + } else { + x.NodeAffinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PodAffinity == nil { + r.EncodeNil() + } else { + x.PodAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodAffinity == nil { + r.EncodeNil() + } else { + x.PodAffinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.PodAntiAffinity == nil { + r.EncodeNil() + } else { + x.PodAntiAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAntiAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodAntiAffinity == nil { + r.EncodeNil() + } else { + x.PodAntiAffinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Affinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Affinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "nodeAffinity": + if r.TryDecodeAsNil() { + if x.NodeAffinity != nil { + x.NodeAffinity = nil + } + } else { + if x.NodeAffinity == nil { + x.NodeAffinity = new(NodeAffinity) + } + x.NodeAffinity.CodecDecodeSelf(d) + } + case "podAffinity": + if r.TryDecodeAsNil() { + if x.PodAffinity != nil { + x.PodAffinity = nil + } + } else { + if x.PodAffinity == nil { + x.PodAffinity = new(PodAffinity) + } + x.PodAffinity.CodecDecodeSelf(d) + } + case "podAntiAffinity": + if r.TryDecodeAsNil() { + if x.PodAntiAffinity != nil { + x.PodAntiAffinity = nil + } + } else { + if x.PodAntiAffinity == nil { + x.PodAntiAffinity = new(PodAntiAffinity) + } + x.PodAntiAffinity.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NodeAffinity != nil { + x.NodeAffinity = nil + } + } else { + if x.NodeAffinity == nil { + x.NodeAffinity = new(NodeAffinity) + } + x.NodeAffinity.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodAffinity != nil { + x.PodAffinity = nil + } + } else { + if x.PodAffinity == nil { + x.PodAffinity = new(PodAffinity) + } + x.PodAffinity.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodAntiAffinity != nil { + x.PodAntiAffinity = nil + } + } else { + if x.PodAntiAffinity == nil { + x.PodAntiAffinity = new(PodAntiAffinity) + } + x.PodAntiAffinity.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) + } + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAntiAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAntiAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) + } + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAntiAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *WeightedPodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("weight")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.PodAffinityTerm + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAffinityTerm")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.PodAffinityTerm + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *WeightedPodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *WeightedPodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "weight": + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + yyv4 := &x.Weight + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "podAffinityTerm": + if r.TryDecodeAsNil() { + x.PodAffinityTerm = PodAffinityTerm{} + } else { + yyv6 := &x.PodAffinityTerm + yyv6.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *WeightedPodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + yyv8 := &x.Weight + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodAffinityTerm = PodAffinityTerm{} + } else { + yyv10 := &x.PodAffinityTerm + yyv10.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.LabelSelector != nil + yyq2[1] = len(x.Namespaces) != 0 + yyq2[2] = x.TopologyKey != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.LabelSelector == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { + } else { + z.EncFallback(x.LabelSelector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LabelSelector == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { + } else { + z.EncFallback(x.LabelSelector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Namespaces == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncSliceStringV(x.Namespaces, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespaces")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Namespaces == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncSliceStringV(x.Namespaces, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("topologyKey")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "labelSelector": + if r.TryDecodeAsNil() { + if x.LabelSelector != nil { + x.LabelSelector = nil + } + } else { + if x.LabelSelector == nil { + x.LabelSelector = new(pkg2_v1.LabelSelector) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { + } else { + z.DecFallback(x.LabelSelector, false) + } + } + case "namespaces": + if r.TryDecodeAsNil() { + x.Namespaces = nil + } else { + yyv6 := &x.Namespaces + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + case "topologyKey": + if r.TryDecodeAsNil() { + x.TopologyKey = "" + } else { + yyv8 := &x.TopologyKey + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LabelSelector != nil { + x.LabelSelector = nil + } + } else { + if x.LabelSelector == nil { + x.LabelSelector = new(pkg2_v1.LabelSelector) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { + } else { + z.DecFallback(x.LabelSelector, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespaces = nil + } else { + yyv13 := &x.Namespaces + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecSliceStringX(yyv13, false, d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TopologyKey = "" + } else { + yyv15 := &x.TopologyKey + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.RequiredDuringSchedulingIgnoredDuringExecution != nil + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + } else { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) + } + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv5 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + } else { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) + } + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv9), d) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("weight")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.Preference + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preference")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.Preference + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PreferredSchedulingTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "weight": + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + yyv4 := &x.Weight + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "preference": + if r.TryDecodeAsNil() { + x.Preference = NodeSelectorTerm{} + } else { + yyv6 := &x.Preference + yyv6.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PreferredSchedulingTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + yyv8 := &x.Weight + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Preference = NodeSelectorTerm{} + } else { + yyv10 := &x.Preference + yyv10.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Taint) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Value != "" + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Effect.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("effect")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Effect.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy13 := &x.TimeAdded + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(yy13) { + } else if yym14 { + z.EncBinaryMarshal(yy13) + } else if !yym14 && z.IsJSONHandle() { + z.EncJSONMarshal(yy13) + } else { + z.EncFallback(yy13) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("timeAdded")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy15 := &x.TimeAdded + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Taint) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Taint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv6 := &x.Value + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "effect": + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + yyv8 := &x.Effect + yyv8.CodecDecodeSelf(d) + } + case "timeAdded": + if r.TryDecodeAsNil() { + x.TimeAdded = pkg2_v1.Time{} + } else { + yyv9 := &x.TimeAdded + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if yym10 { + z.DecBinaryUnmarshal(yyv9) + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Taint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv12 := &x.Key + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv14 := &x.Value + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + yyv16 := &x.Effect + yyv16.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TimeAdded = pkg2_v1.Time{} + } else { + yyv17 := &x.TimeAdded + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x TaintEffect) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *TaintEffect) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *Toleration) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Key != "" + yyq2[1] = x.Operator != "" + yyq2[2] = x.Value != "" + yyq2[3] = x.Effect != "" + yyq2[4] = x.TolerationSeconds != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + x.Operator.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("operator")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Operator.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.Effect.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("effect")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Effect.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.TolerationSeconds == nil { + r.EncodeNil() + } else { + yy16 := *x.TolerationSeconds + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(yy16)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tolerationSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TolerationSeconds == nil { + r.EncodeNil() + } else { + yy18 := *x.TolerationSeconds + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(yy18)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Toleration) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Toleration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "operator": + if r.TryDecodeAsNil() { + x.Operator = "" + } else { + yyv6 := &x.Operator + yyv6.CodecDecodeSelf(d) + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv7 := &x.Value + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + case "effect": + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + yyv9 := &x.Effect + yyv9.CodecDecodeSelf(d) + } + case "tolerationSeconds": + if r.TryDecodeAsNil() { + if x.TolerationSeconds != nil { + x.TolerationSeconds = nil + } + } else { + if x.TolerationSeconds == nil { + x.TolerationSeconds = new(int64) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int64)(x.TolerationSeconds)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Toleration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv13 := &x.Key + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Operator = "" + } else { + yyv15 := &x.Operator + yyv15.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv16 := &x.Value + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + yyv18 := &x.Effect + yyv18.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TolerationSeconds != nil { + x.TolerationSeconds = nil + } + } else { + if x.TolerationSeconds == nil { + x.TolerationSeconds = new(int64) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(x.TolerationSeconds)) = int64(r.DecodeInt(64)) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x TolerationOperator) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *TolerationOperator) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [23]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Volumes) != 0 + yyq2[1] = len(x.InitContainers) != 0 + yyq2[3] = x.RestartPolicy != "" + yyq2[4] = x.TerminationGracePeriodSeconds != nil + yyq2[5] = x.ActiveDeadlineSeconds != nil + yyq2[6] = x.DNSPolicy != "" + yyq2[7] = len(x.NodeSelector) != 0 + yyq2[8] = x.ServiceAccountName != "" + yyq2[9] = x.DeprecatedServiceAccount != "" + yyq2[10] = x.AutomountServiceAccountToken != nil + yyq2[11] = x.NodeName != "" + yyq2[12] = x.HostNetwork != false + yyq2[13] = x.HostPID != false + yyq2[14] = x.HostIPC != false + yyq2[15] = x.SecurityContext != nil + yyq2[16] = len(x.ImagePullSecrets) != 0 + yyq2[17] = x.Hostname != "" + yyq2[18] = x.Subdomain != "" + yyq2[19] = x.Affinity != nil + yyq2[20] = x.SchedulerName != "" + yyq2[21] = len(x.Tolerations) != 0 + yyq2[22] = len(x.HostAliases) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(23) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Volumes == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceVolume(([]Volume)(x.Volumes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Volumes == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceVolume(([]Volume)(x.Volumes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.InitContainers == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceContainer(([]Container)(x.InitContainers), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("initContainers")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.InitContainers == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceContainer(([]Container)(x.InitContainers), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Containers == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceContainer(([]Container)(x.Containers), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containers")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Containers == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSliceContainer(([]Container)(x.Containers), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.RestartPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("restartPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.RestartPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.TerminationGracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy16 := *x.TerminationGracePeriodSeconds + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(yy16)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("terminationGracePeriodSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TerminationGracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy18 := *x.TerminationGracePeriodSeconds + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(yy18)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.ActiveDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy21 := *x.ActiveDeadlineSeconds + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(yy21)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ActiveDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy23 := *x.ActiveDeadlineSeconds + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeInt(int64(yy23)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + x.DNSPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("dnsPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.DNSPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.NodeSelector == nil { + r.EncodeNil() + } else { + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + z.F.EncMapStringStringV(x.NodeSelector, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NodeSelector == nil { + r.EncodeNil() + } else { + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + z.F.EncMapStringStringV(x.NodeSelector, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceAccountName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + yym35 := z.EncBinary() + _ = yym35 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DeprecatedServiceAccount)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceAccount")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym36 := z.EncBinary() + _ = yym36 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DeprecatedServiceAccount)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.AutomountServiceAccountToken == nil { + r.EncodeNil() + } else { + yy38 := *x.AutomountServiceAccountToken + yym39 := z.EncBinary() + _ = yym39 + if false { + } else { + r.EncodeBool(bool(yy38)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("automountServiceAccountToken")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AutomountServiceAccountToken == nil { + r.EncodeNil() + } else { + yy40 := *x.AutomountServiceAccountToken + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + r.EncodeBool(bool(yy40)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + yym43 := z.EncBinary() + _ = yym43 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + yym46 := z.EncBinary() + _ = yym46 + if false { + } else { + r.EncodeBool(bool(x.HostNetwork)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym47 := z.EncBinary() + _ = yym47 + if false { + } else { + r.EncodeBool(bool(x.HostNetwork)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + yym49 := z.EncBinary() + _ = yym49 + if false { + } else { + r.EncodeBool(bool(x.HostPID)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym50 := z.EncBinary() + _ = yym50 + if false { + } else { + r.EncodeBool(bool(x.HostPID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + yym52 := z.EncBinary() + _ = yym52 + if false { + } else { + r.EncodeBool(bool(x.HostIPC)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym53 := z.EncBinary() + _ = yym53 + if false { + } else { + r.EncodeBool(bool(x.HostIPC)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + if x.SecurityContext == nil { + r.EncodeNil() + } else { + x.SecurityContext.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("securityContext")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecurityContext == nil { + r.EncodeNil() + } else { + x.SecurityContext.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.ImagePullSecrets == nil { + r.EncodeNil() + } else { + yym58 := z.EncBinary() + _ = yym58 + if false { + } else { + h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ImagePullSecrets == nil { + r.EncodeNil() + } else { + yym59 := z.EncBinary() + _ = yym59 + if false { + } else { + h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + yym61 := z.EncBinary() + _ = yym61 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostname")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym62 := z.EncBinary() + _ = yym62 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + yym64 := z.EncBinary() + _ = yym64 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subdomain")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym65 := z.EncBinary() + _ = yym65 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.Affinity == nil { + r.EncodeNil() + } else { + x.Affinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("affinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Affinity == nil { + r.EncodeNil() + } else { + x.Affinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[20] { + yym70 := z.EncBinary() + _ = yym70 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[20] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("schedulerName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym71 := z.EncBinary() + _ = yym71 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[21] { + if x.Tolerations == nil { + r.EncodeNil() + } else { + yym73 := z.EncBinary() + _ = yym73 + if false { + } else { + h.encSliceToleration(([]Toleration)(x.Tolerations), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[21] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tolerations")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Tolerations == nil { + r.EncodeNil() + } else { + yym74 := z.EncBinary() + _ = yym74 + if false { + } else { + h.encSliceToleration(([]Toleration)(x.Tolerations), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[22] { + if x.HostAliases == nil { + r.EncodeNil() + } else { + yym76 := z.EncBinary() + _ = yym76 + if false { + } else { + h.encSliceHostAlias(([]HostAlias)(x.HostAliases), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[22] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostMappings")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HostAliases == nil { + r.EncodeNil() + } else { + yym77 := z.EncBinary() + _ = yym77 + if false { + } else { + h.encSliceHostAlias(([]HostAlias)(x.HostAliases), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumes": + if r.TryDecodeAsNil() { + x.Volumes = nil + } else { + yyv4 := &x.Volumes + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceVolume((*[]Volume)(yyv4), d) + } + } + case "initContainers": + if r.TryDecodeAsNil() { + x.InitContainers = nil + } else { + yyv6 := &x.InitContainers + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceContainer((*[]Container)(yyv6), d) + } + } + case "containers": + if r.TryDecodeAsNil() { + x.Containers = nil + } else { + yyv8 := &x.Containers + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + h.decSliceContainer((*[]Container)(yyv8), d) + } + } + case "restartPolicy": + if r.TryDecodeAsNil() { + x.RestartPolicy = "" + } else { + yyv10 := &x.RestartPolicy + yyv10.CodecDecodeSelf(d) + } + case "terminationGracePeriodSeconds": + if r.TryDecodeAsNil() { + if x.TerminationGracePeriodSeconds != nil { + x.TerminationGracePeriodSeconds = nil + } + } else { + if x.TerminationGracePeriodSeconds == nil { + x.TerminationGracePeriodSeconds = new(int64) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + case "activeDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.ActiveDeadlineSeconds != nil { + x.ActiveDeadlineSeconds = nil + } + } else { + if x.ActiveDeadlineSeconds == nil { + x.ActiveDeadlineSeconds = new(int64) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + case "dnsPolicy": + if r.TryDecodeAsNil() { + x.DNSPolicy = "" + } else { + yyv15 := &x.DNSPolicy + yyv15.CodecDecodeSelf(d) + } + case "nodeSelector": + if r.TryDecodeAsNil() { + x.NodeSelector = nil + } else { + yyv16 := &x.NodeSelector + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + z.F.DecMapStringStringX(yyv16, false, d) + } + } + case "serviceAccountName": + if r.TryDecodeAsNil() { + x.ServiceAccountName = "" + } else { + yyv18 := &x.ServiceAccountName + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + case "serviceAccount": + if r.TryDecodeAsNil() { + x.DeprecatedServiceAccount = "" + } else { + yyv20 := &x.DeprecatedServiceAccount + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + case "automountServiceAccountToken": + if r.TryDecodeAsNil() { + if x.AutomountServiceAccountToken != nil { + x.AutomountServiceAccountToken = nil + } + } else { + if x.AutomountServiceAccountToken == nil { + x.AutomountServiceAccountToken = new(bool) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() + } + } + case "nodeName": + if r.TryDecodeAsNil() { + x.NodeName = "" + } else { + yyv24 := &x.NodeName + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } + } + case "hostNetwork": + if r.TryDecodeAsNil() { + x.HostNetwork = false + } else { + yyv26 := &x.HostNetwork + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*bool)(yyv26)) = r.DecodeBool() + } + } + case "hostPID": + if r.TryDecodeAsNil() { + x.HostPID = false + } else { + yyv28 := &x.HostPID + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*bool)(yyv28)) = r.DecodeBool() + } + } + case "hostIPC": + if r.TryDecodeAsNil() { + x.HostIPC = false + } else { + yyv30 := &x.HostIPC + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*bool)(yyv30)) = r.DecodeBool() + } + } + case "securityContext": + if r.TryDecodeAsNil() { + if x.SecurityContext != nil { + x.SecurityContext = nil + } + } else { + if x.SecurityContext == nil { + x.SecurityContext = new(PodSecurityContext) + } + x.SecurityContext.CodecDecodeSelf(d) + } + case "imagePullSecrets": + if r.TryDecodeAsNil() { + x.ImagePullSecrets = nil + } else { + yyv33 := &x.ImagePullSecrets + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv33), d) + } + } + case "hostname": + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv35 := &x.Hostname + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + case "subdomain": + if r.TryDecodeAsNil() { + x.Subdomain = "" + } else { + yyv37 := &x.Subdomain + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + case "affinity": + if r.TryDecodeAsNil() { + if x.Affinity != nil { + x.Affinity = nil + } + } else { + if x.Affinity == nil { + x.Affinity = new(Affinity) + } + x.Affinity.CodecDecodeSelf(d) + } + case "schedulerName": + if r.TryDecodeAsNil() { + x.SchedulerName = "" + } else { + yyv40 := &x.SchedulerName + yym41 := z.DecBinary() + _ = yym41 + if false { + } else { + *((*string)(yyv40)) = r.DecodeString() + } + } + case "tolerations": + if r.TryDecodeAsNil() { + x.Tolerations = nil + } else { + yyv42 := &x.Tolerations + yym43 := z.DecBinary() + _ = yym43 + if false { + } else { + h.decSliceToleration((*[]Toleration)(yyv42), d) + } + } + case "hostMappings": + if r.TryDecodeAsNil() { + x.HostAliases = nil + } else { + yyv44 := &x.HostAliases + yym45 := z.DecBinary() + _ = yym45 + if false { + } else { + h.decSliceHostAlias((*[]HostAlias)(yyv44), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj46 int + var yyb46 bool + var yyhl46 bool = l >= 0 + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Volumes = nil + } else { + yyv47 := &x.Volumes + yym48 := z.DecBinary() + _ = yym48 + if false { + } else { + h.decSliceVolume((*[]Volume)(yyv47), d) + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.InitContainers = nil + } else { + yyv49 := &x.InitContainers + yym50 := z.DecBinary() + _ = yym50 + if false { + } else { + h.decSliceContainer((*[]Container)(yyv49), d) + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Containers = nil + } else { + yyv51 := &x.Containers + yym52 := z.DecBinary() + _ = yym52 + if false { + } else { + h.decSliceContainer((*[]Container)(yyv51), d) + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RestartPolicy = "" + } else { + yyv53 := &x.RestartPolicy + yyv53.CodecDecodeSelf(d) + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TerminationGracePeriodSeconds != nil { + x.TerminationGracePeriodSeconds = nil + } + } else { + if x.TerminationGracePeriodSeconds == nil { + x.TerminationGracePeriodSeconds = new(int64) + } + yym55 := z.DecBinary() + _ = yym55 + if false { + } else { + *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ActiveDeadlineSeconds != nil { + x.ActiveDeadlineSeconds = nil + } + } else { + if x.ActiveDeadlineSeconds == nil { + x.ActiveDeadlineSeconds = new(int64) + } + yym57 := z.DecBinary() + _ = yym57 + if false { + } else { + *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DNSPolicy = "" + } else { + yyv58 := &x.DNSPolicy + yyv58.CodecDecodeSelf(d) + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodeSelector = nil + } else { + yyv59 := &x.NodeSelector + yym60 := z.DecBinary() + _ = yym60 + if false { + } else { + z.F.DecMapStringStringX(yyv59, false, d) + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServiceAccountName = "" + } else { + yyv61 := &x.ServiceAccountName + yym62 := z.DecBinary() + _ = yym62 + if false { + } else { + *((*string)(yyv61)) = r.DecodeString() + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DeprecatedServiceAccount = "" + } else { + yyv63 := &x.DeprecatedServiceAccount + yym64 := z.DecBinary() + _ = yym64 + if false { + } else { + *((*string)(yyv63)) = r.DecodeString() + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AutomountServiceAccountToken != nil { + x.AutomountServiceAccountToken = nil + } + } else { + if x.AutomountServiceAccountToken == nil { + x.AutomountServiceAccountToken = new(bool) + } + yym66 := z.DecBinary() + _ = yym66 + if false { + } else { + *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodeName = "" + } else { + yyv67 := &x.NodeName + yym68 := z.DecBinary() + _ = yym68 + if false { + } else { + *((*string)(yyv67)) = r.DecodeString() + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostNetwork = false + } else { + yyv69 := &x.HostNetwork + yym70 := z.DecBinary() + _ = yym70 + if false { + } else { + *((*bool)(yyv69)) = r.DecodeBool() + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostPID = false + } else { + yyv71 := &x.HostPID + yym72 := z.DecBinary() + _ = yym72 + if false { + } else { + *((*bool)(yyv71)) = r.DecodeBool() + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostIPC = false + } else { + yyv73 := &x.HostIPC + yym74 := z.DecBinary() + _ = yym74 + if false { + } else { + *((*bool)(yyv73)) = r.DecodeBool() + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecurityContext != nil { + x.SecurityContext = nil + } + } else { + if x.SecurityContext == nil { + x.SecurityContext = new(PodSecurityContext) + } + x.SecurityContext.CodecDecodeSelf(d) + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ImagePullSecrets = nil + } else { + yyv76 := &x.ImagePullSecrets + yym77 := z.DecBinary() + _ = yym77 + if false { + } else { + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv76), d) + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv78 := &x.Hostname + yym79 := z.DecBinary() + _ = yym79 + if false { + } else { + *((*string)(yyv78)) = r.DecodeString() + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subdomain = "" + } else { + yyv80 := &x.Subdomain + yym81 := z.DecBinary() + _ = yym81 + if false { + } else { + *((*string)(yyv80)) = r.DecodeString() + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Affinity != nil { + x.Affinity = nil + } + } else { + if x.Affinity == nil { + x.Affinity = new(Affinity) + } + x.Affinity.CodecDecodeSelf(d) + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SchedulerName = "" + } else { + yyv83 := &x.SchedulerName + yym84 := z.DecBinary() + _ = yym84 + if false { + } else { + *((*string)(yyv83)) = r.DecodeString() + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Tolerations = nil + } else { + yyv85 := &x.Tolerations + yym86 := z.DecBinary() + _ = yym86 + if false { + } else { + h.decSliceToleration((*[]Toleration)(yyv85), d) + } + } + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostAliases = nil + } else { + yyv87 := &x.HostAliases + yym88 := z.DecBinary() + _ = yym88 + if false { + } else { + h.decSliceHostAlias((*[]HostAlias)(yyv87), d) + } + } + for { + yyj46++ + if yyhl46 { + yyb46 = yyj46 > l + } else { + yyb46 = r.CheckBreak() + } + if yyb46 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj46-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HostAlias) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.IP != "" + yyq2[1] = len(x.Hostnames) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ip")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Hostnames == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncSliceStringV(x.Hostnames, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostnames")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Hostnames == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncSliceStringV(x.Hostnames, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HostAlias) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HostAlias) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "ip": + if r.TryDecodeAsNil() { + x.IP = "" + } else { + yyv4 := &x.IP + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "hostnames": + if r.TryDecodeAsNil() { + x.Hostnames = nil + } else { + yyv6 := &x.Hostnames + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HostAlias) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.IP = "" + } else { + yyv9 := &x.IP + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hostnames = nil + } else { + yyv11 := &x.Hostnames + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + z.F.DecSliceStringX(yyv11, false, d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodSecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.SELinuxOptions != nil + yyq2[1] = x.RunAsUser != nil + yyq2[2] = x.RunAsNonRoot != nil + yyq2[3] = len(x.SupplementalGroups) != 0 + yyq2[4] = x.FSGroup != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.RunAsUser == nil { + r.EncodeNil() + } else { + yy7 := *x.RunAsUser + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RunAsUser == nil { + r.EncodeNil() + } else { + yy9 := *x.RunAsUser + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.RunAsNonRoot == nil { + r.EncodeNil() + } else { + yy12 := *x.RunAsNonRoot + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RunAsNonRoot == nil { + r.EncodeNil() + } else { + yy14 := *x.RunAsNonRoot + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeBool(bool(yy14)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.SupplementalGroups == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSlicetypes_UnixGroupID(([]pkg1_types.UnixGroupID)(x.SupplementalGroups), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SupplementalGroups == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSlicetypes_UnixGroupID(([]pkg1_types.UnixGroupID)(x.SupplementalGroups), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.FSGroup == nil { + r.EncodeNil() + } else { + yy20 := *x.FSGroup + yym21 := z.EncBinary() + _ = yym21 + if false { + } else if z.HasExtensions() && z.EncExt(yy20) { + } else { + r.EncodeInt(int64(yy20)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FSGroup == nil { + r.EncodeNil() + } else { + yy22 := *x.FSGroup + yym23 := z.EncBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.EncExt(yy22) { + } else { + r.EncodeInt(int64(yy22)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodSecurityContext) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodSecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "seLinuxOptions": + if r.TryDecodeAsNil() { + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } + } else { + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(SELinuxOptions) + } + x.SELinuxOptions.CodecDecodeSelf(d) + } + case "runAsUser": + if r.TryDecodeAsNil() { + if x.RunAsUser != nil { + x.RunAsUser = nil + } + } else { + if x.RunAsUser == nil { + x.RunAsUser = new(pkg1_types.UnixUserID) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else if z.HasExtensions() && z.DecExt(x.RunAsUser) { + } else { + *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) + } + } + case "runAsNonRoot": + if r.TryDecodeAsNil() { + if x.RunAsNonRoot != nil { + x.RunAsNonRoot = nil + } + } else { + if x.RunAsNonRoot == nil { + x.RunAsNonRoot = new(bool) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() + } + } + case "supplementalGroups": + if r.TryDecodeAsNil() { + x.SupplementalGroups = nil + } else { + yyv9 := &x.SupplementalGroups + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicetypes_UnixGroupID((*[]pkg1_types.UnixGroupID)(yyv9), d) + } + } + case "fsGroup": + if r.TryDecodeAsNil() { + if x.FSGroup != nil { + x.FSGroup = nil + } + } else { + if x.FSGroup == nil { + x.FSGroup = new(pkg1_types.UnixGroupID) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.FSGroup) { + } else { + *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodSecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } + } else { + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(SELinuxOptions) + } + x.SELinuxOptions.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RunAsUser != nil { + x.RunAsUser = nil + } + } else { + if x.RunAsUser == nil { + x.RunAsUser = new(pkg1_types.UnixUserID) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(x.RunAsUser) { + } else { + *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RunAsNonRoot != nil { + x.RunAsNonRoot = nil + } + } else { + if x.RunAsNonRoot == nil { + x.RunAsNonRoot = new(bool) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SupplementalGroups = nil + } else { + yyv19 := &x.SupplementalGroups + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicetypes_UnixGroupID((*[]pkg1_types.UnixGroupID)(yyv19), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FSGroup != nil { + x.FSGroup = nil + } + } else { + if x.FSGroup == nil { + x.FSGroup = new(pkg1_types.UnixGroupID) + } + yym22 := z.DecBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.DecExt(x.FSGroup) { + } else { + *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64)) + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x PodQOSClass) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PodQOSClass) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + yyq2[1] = len(x.Conditions) != 0 + yyq2[2] = x.Message != "" + yyq2[3] = x.Reason != "" + yyq2[4] = x.HostIP != "" + yyq2[5] = x.PodIP != "" + yyq2[6] = x.StartTime != nil + yyq2[7] = len(x.InitContainerStatuses) != 0 + yyq2[8] = len(x.ContainerStatuses) != 0 + yyq2[9] = x.QOSClass != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Phase.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("phase")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Phase.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSlicePodCondition(([]PodCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSlicePodCondition(([]PodCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PodIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PodIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.StartTime == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.EncExt(x.StartTime) { + } else if yym22 { + z.EncBinaryMarshal(x.StartTime) + } else if !yym22 && z.IsJSONHandle() { + z.EncJSONMarshal(x.StartTime) + } else { + z.EncFallback(x.StartTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StartTime == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.EncExt(x.StartTime) { + } else if yym23 { + z.EncBinaryMarshal(x.StartTime) + } else if !yym23 && z.IsJSONHandle() { + z.EncJSONMarshal(x.StartTime) + } else { + z.EncFallback(x.StartTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.InitContainerStatuses == nil { + r.EncodeNil() + } else { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + h.encSliceContainerStatus(([]ContainerStatus)(x.InitContainerStatuses), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("initContainerStatuses")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.InitContainerStatuses == nil { + r.EncodeNil() + } else { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + h.encSliceContainerStatus(([]ContainerStatus)(x.InitContainerStatuses), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.ContainerStatuses == nil { + r.EncodeNil() + } else { + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerStatuses")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ContainerStatuses == nil { + r.EncodeNil() + } else { + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + x.QOSClass.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("qosClass")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.QOSClass.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "phase": + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv5 := &x.Conditions + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSlicePodCondition((*[]PodCondition)(yyv5), d) + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv7 := &x.Message + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv9 := &x.Reason + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + case "hostIP": + if r.TryDecodeAsNil() { + x.HostIP = "" + } else { + yyv11 := &x.HostIP + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + case "podIP": + if r.TryDecodeAsNil() { + x.PodIP = "" + } else { + yyv13 := &x.PodIP + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + case "startTime": + if r.TryDecodeAsNil() { + if x.StartTime != nil { + x.StartTime = nil + } + } else { + if x.StartTime == nil { + x.StartTime = new(pkg2_v1.Time) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(x.StartTime) { + } else if yym16 { + z.DecBinaryUnmarshal(x.StartTime) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.StartTime) + } else { + z.DecFallback(x.StartTime, false) + } + } + case "initContainerStatuses": + if r.TryDecodeAsNil() { + x.InitContainerStatuses = nil + } else { + yyv17 := &x.InitContainerStatuses + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decSliceContainerStatus((*[]ContainerStatus)(yyv17), d) + } + } + case "containerStatuses": + if r.TryDecodeAsNil() { + x.ContainerStatuses = nil + } else { + yyv19 := &x.ContainerStatuses + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceContainerStatus((*[]ContainerStatus)(yyv19), d) + } + } + case "qosClass": + if r.TryDecodeAsNil() { + x.QOSClass = "" + } else { + yyv21 := &x.QOSClass + yyv21.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj22 int + var yyb22 bool + var yyhl22 bool = l >= 0 + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv23 := &x.Phase + yyv23.CodecDecodeSelf(d) + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv24 := &x.Conditions + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + h.decSlicePodCondition((*[]PodCondition)(yyv24), d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv26 := &x.Message + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv28 := &x.Reason + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostIP = "" + } else { + yyv30 := &x.HostIP + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(yyv30)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodIP = "" + } else { + yyv32 := &x.PodIP + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*string)(yyv32)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StartTime != nil { + x.StartTime = nil + } + } else { + if x.StartTime == nil { + x.StartTime = new(pkg2_v1.Time) + } + yym35 := z.DecBinary() + _ = yym35 + if false { + } else if z.HasExtensions() && z.DecExt(x.StartTime) { + } else if yym35 { + z.DecBinaryUnmarshal(x.StartTime) + } else if !yym35 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.StartTime) + } else { + z.DecFallback(x.StartTime, false) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.InitContainerStatuses = nil + } else { + yyv36 := &x.InitContainerStatuses + yym37 := z.DecBinary() + _ = yym37 + if false { + } else { + h.decSliceContainerStatus((*[]ContainerStatus)(yyv36), d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerStatuses = nil + } else { + yyv38 := &x.ContainerStatuses + yym39 := z.DecBinary() + _ = yym39 + if false { + } else { + h.decSliceContainerStatus((*[]ContainerStatus)(yyv38), d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.QOSClass = "" + } else { + yyv40 := &x.QOSClass + yyv40.CodecDecodeSelf(d) + } + for { + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj22-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodStatusResult) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Status + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Status + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodStatusResult) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodStatusResult) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PodStatus{} + } else { + yyv10 := &x.Status + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodStatusResult) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PodStatus{} + } else { + yyv18 := &x.Status + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Pod) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Pod) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Pod) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PodSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PodStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Pod) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PodSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PodStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePod(([]Pod)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePod(([]Pod)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePod((*[]Pod)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePod((*[]Pod)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PodSpec{} + } else { + yyv6 := &x.Spec + yyv6.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PodSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodTemplate) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Template + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Template + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodTemplate) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = PodTemplateSpec{} + } else { + yyv10 := &x.Template + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = PodTemplateSpec{} + } else { + yyv18 := &x.Template + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodTemplateList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePodTemplate(([]PodTemplate)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePodTemplate(([]PodTemplate)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodTemplateList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodTemplateList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePodTemplate((*[]PodTemplate)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodTemplateList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePodTemplate((*[]PodTemplate)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicationControllerSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.MinReadySeconds != 0 + yyq2[2] = len(x.Selector) != 0 + yyq2[3] = x.Template != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Replicas == nil { + r.EncodeNil() + } else { + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Replicas == nil { + r.EncodeNil() + } else { + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Template == nil { + r.EncodeNil() + } else { + x.Template.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Template == nil { + r.EncodeNil() + } else { + x.Template.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationControllerSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationControllerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + case "minReadySeconds": + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv6 := &x.MinReadySeconds + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv8 := &x.Selector + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecMapStringStringX(yyv8, false, d) + } + } + case "template": + if r.TryDecodeAsNil() { + if x.Template != nil { + x.Template = nil + } + } else { + if x.Template == nil { + x.Template = new(PodTemplateSpec) + } + x.Template.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationControllerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv14 := &x.MinReadySeconds + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv16 := &x.Selector + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + z.F.DecMapStringStringX(yyv16, false, d) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Template != nil { + x.Template = nil + } + } else { + if x.Template == nil { + x.Template = new(PodTemplateSpec) + } + x.Template.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FullyLabeledReplicas != 0 + yyq2[2] = x.ReadyReplicas != 0 + yyq2[3] = x.AvailableReplicas != 0 + yyq2[4] = x.ObservedGeneration != 0 + yyq2[5] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.FullyLabeledReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.FullyLabeledReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceReplicationControllerCondition(([]ReplicationControllerCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + h.encSliceReplicationControllerCondition(([]ReplicationControllerCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationControllerStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationControllerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "fullyLabeledReplicas": + if r.TryDecodeAsNil() { + x.FullyLabeledReplicas = 0 + } else { + yyv6 := &x.FullyLabeledReplicas + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "readyReplicas": + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv8 := &x.ReadyReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "availableReplicas": + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv10 := &x.AvailableReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "observedGeneration": + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv12 := &x.ObservedGeneration + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int64)(yyv12)) = int64(r.DecodeInt(64)) + } + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv14 := &x.Conditions + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decSliceReplicationControllerCondition((*[]ReplicationControllerCondition)(yyv14), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv17 := &x.Replicas + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FullyLabeledReplicas = 0 + } else { + yyv19 := &x.FullyLabeledReplicas + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv21 := &x.ReadyReplicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv23 := &x.AvailableReplicas + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv25 := &x.ObservedGeneration + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int64)(yyv25)) = int64(r.DecodeInt(64)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv27 := &x.Conditions + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + h.decSliceReplicationControllerCondition((*[]ReplicationControllerCondition)(yyv27), d) + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ReplicationControllerConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ReplicationControllerConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ReplicationControllerCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = x.Reason != "" + yyq2[4] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Status.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Status.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastTransitionTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastTransitionTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationControllerCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationControllerCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv6 := &x.LastTransitionTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv8 := &x.Reason + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv10 := &x.Message + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationControllerCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv13 := &x.Type + yyv13.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv14 := &x.Status + yyv14.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv15 := &x.LastTransitionTime + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else if yym16 { + z.DecBinaryUnmarshal(yyv15) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv15) + } else { + z.DecFallback(yyv15, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv17 := &x.Reason + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv19 := &x.Message + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicationController) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationController) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationController) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ReplicationControllerSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ReplicationControllerStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationController) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ReplicationControllerSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ReplicationControllerStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicationControllerList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceReplicationController(([]ReplicationController)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceReplicationController(([]ReplicationController)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationControllerList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationControllerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceReplicationController((*[]ReplicationController)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationControllerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceReplicationController((*[]ReplicationController)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ServiceAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ServiceAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x ServiceType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ServiceType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x ServiceExternalTrafficPolicyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ServiceExternalTrafficPolicyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ServiceStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.LoadBalancer + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.LoadBalancer + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "loadBalancer": + if r.TryDecodeAsNil() { + x.LoadBalancer = LoadBalancerStatus{} + } else { + yyv4 := &x.LoadBalancer + yyv4.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LoadBalancer = LoadBalancerStatus{} + } else { + yyv6 := &x.LoadBalancer + yyv6.CodecDecodeSelf(d) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LoadBalancerStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Ingress) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Ingress == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ingress")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ingress == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LoadBalancerStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LoadBalancerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "ingress": + if r.TryDecodeAsNil() { + x.Ingress = nil + } else { + yyv4 := &x.Ingress + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LoadBalancerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ingress = nil + } else { + yyv7 := &x.Ingress + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LoadBalancerIngress) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.IP != "" + yyq2[1] = x.Hostname != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ip")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostname")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LoadBalancerIngress) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LoadBalancerIngress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "ip": + if r.TryDecodeAsNil() { + x.IP = "" + } else { + yyv4 := &x.IP + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "hostname": + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv6 := &x.Hostname + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LoadBalancerIngress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.IP = "" + } else { + yyv9 := &x.IP + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv11 := &x.Hostname + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [11]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Ports) != 0 + yyq2[1] = len(x.Selector) != 0 + yyq2[2] = x.ClusterIP != "" + yyq2[3] = x.Type != "" + yyq2[4] = len(x.ExternalIPs) != 0 + yyq2[5] = x.SessionAffinity != "" + yyq2[6] = x.LoadBalancerIP != "" + yyq2[7] = len(x.LoadBalancerSourceRanges) != 0 + yyq2[8] = x.ExternalName != "" + yyq2[9] = x.ExternalTrafficPolicy != "" + yyq2[10] = x.HealthCheckNodePort != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(11) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Ports == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceServicePort(([]ServicePort)(x.Ports), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceServicePort(([]ServicePort)(x.Ports), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("clusterIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.ExternalIPs == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncSliceStringV(x.ExternalIPs, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("externalIPs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ExternalIPs == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncSliceStringV(x.ExternalIPs, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + x.SessionAffinity.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sessionAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.SessionAffinity.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("loadBalancerIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.LoadBalancerSourceRanges == nil { + r.EncodeNil() + } else { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("loadBalancerSourceRanges")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LoadBalancerSourceRanges == nil { + r.EncodeNil() + } else { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ExternalName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("externalName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ExternalName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + x.ExternalTrafficPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("externalTrafficPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.ExternalTrafficPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + yym34 := z.EncBinary() + _ = yym34 + if false { + } else { + r.EncodeInt(int64(x.HealthCheckNodePort)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("healthCheckNodePort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym35 := z.EncBinary() + _ = yym35 + if false { + } else { + r.EncodeInt(int64(x.HealthCheckNodePort)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "ports": + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv4 := &x.Ports + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceServicePort((*[]ServicePort)(yyv4), d) + } + } + case "selector": + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv6 := &x.Selector + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecMapStringStringX(yyv6, false, d) + } + } + case "clusterIP": + if r.TryDecodeAsNil() { + x.ClusterIP = "" + } else { + yyv8 := &x.ClusterIP + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv10 := &x.Type + yyv10.CodecDecodeSelf(d) + } + case "externalIPs": + if r.TryDecodeAsNil() { + x.ExternalIPs = nil + } else { + yyv11 := &x.ExternalIPs + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + z.F.DecSliceStringX(yyv11, false, d) + } + } + case "sessionAffinity": + if r.TryDecodeAsNil() { + x.SessionAffinity = "" + } else { + yyv13 := &x.SessionAffinity + yyv13.CodecDecodeSelf(d) + } + case "loadBalancerIP": + if r.TryDecodeAsNil() { + x.LoadBalancerIP = "" + } else { + yyv14 := &x.LoadBalancerIP + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "loadBalancerSourceRanges": + if r.TryDecodeAsNil() { + x.LoadBalancerSourceRanges = nil + } else { + yyv16 := &x.LoadBalancerSourceRanges + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + z.F.DecSliceStringX(yyv16, false, d) + } + } + case "externalName": + if r.TryDecodeAsNil() { + x.ExternalName = "" + } else { + yyv18 := &x.ExternalName + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + case "externalTrafficPolicy": + if r.TryDecodeAsNil() { + x.ExternalTrafficPolicy = "" + } else { + yyv20 := &x.ExternalTrafficPolicy + yyv20.CodecDecodeSelf(d) + } + case "healthCheckNodePort": + if r.TryDecodeAsNil() { + x.HealthCheckNodePort = 0 + } else { + yyv21 := &x.HealthCheckNodePort + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv24 := &x.Ports + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + h.decSliceServicePort((*[]ServicePort)(yyv24), d) + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv26 := &x.Selector + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + z.F.DecMapStringStringX(yyv26, false, d) + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ClusterIP = "" + } else { + yyv28 := &x.ClusterIP + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv30 := &x.Type + yyv30.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExternalIPs = nil + } else { + yyv31 := &x.ExternalIPs + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + z.F.DecSliceStringX(yyv31, false, d) + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SessionAffinity = "" + } else { + yyv33 := &x.SessionAffinity + yyv33.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LoadBalancerIP = "" + } else { + yyv34 := &x.LoadBalancerIP + yym35 := z.DecBinary() + _ = yym35 + if false { + } else { + *((*string)(yyv34)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LoadBalancerSourceRanges = nil + } else { + yyv36 := &x.LoadBalancerSourceRanges + yym37 := z.DecBinary() + _ = yym37 + if false { + } else { + z.F.DecSliceStringX(yyv36, false, d) + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExternalName = "" + } else { + yyv38 := &x.ExternalName + yym39 := z.DecBinary() + _ = yym39 + if false { + } else { + *((*string)(yyv38)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExternalTrafficPolicy = "" + } else { + yyv40 := &x.ExternalTrafficPolicy + yyv40.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HealthCheckNodePort = 0 + } else { + yyv41 := &x.HealthCheckNodePort + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*int32)(yyv41)) = int32(r.DecodeInt(32)) + } + } + for { + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj23-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServicePort) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.Protocol != "" + yyq2[3] = true + yyq2[4] = x.NodePort != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + x.Protocol.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protocol")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Protocol.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy13 := &x.TargetPort + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(yy13) { + } else if !yym14 && z.IsJSONHandle() { + z.EncJSONMarshal(yy13) + } else { + z.EncFallback(yy13) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetPort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy15 := &x.TargetPort + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeInt(int64(x.NodePort)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodePort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.NodePort)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServicePort) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServicePort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "protocol": + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv6 := &x.Protocol + yyv6.CodecDecodeSelf(d) + } + case "port": + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv7 := &x.Port + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + case "targetPort": + if r.TryDecodeAsNil() { + x.TargetPort = pkg4_intstr.IntOrString{} + } else { + yyv9 := &x.TargetPort + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } + } + case "nodePort": + if r.TryDecodeAsNil() { + x.NodePort = 0 + } else { + yyv11 := &x.NodePort + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv14 := &x.Name + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv16 := &x.Protocol + yyv16.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv17 := &x.Port + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetPort = pkg4_intstr.IntOrString{} + } else { + yyv19 := &x.TargetPort + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodePort = 0 + } else { + yyv21 := &x.NodePort + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Service) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Service) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Service) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ServiceSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ServiceStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Service) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ServiceSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ServiceStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServiceList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceService(([]Service)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceService(([]Service)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceService((*[]Service)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceService((*[]Service)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServiceAccount) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Secrets) != 0 + yyq2[4] = len(x.ImagePullSecrets) != 0 + yyq2[5] = x.AutomountServiceAccountToken != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Secrets == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secrets")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Secrets == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.ImagePullSecrets == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ImagePullSecrets == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.AutomountServiceAccountToken == nil { + r.EncodeNil() + } else { + yy21 := *x.AutomountServiceAccountToken + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeBool(bool(yy21)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("automountServiceAccountToken")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AutomountServiceAccountToken == nil { + r.EncodeNil() + } else { + yy23 := *x.AutomountServiceAccountToken + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeBool(bool(yy23)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceAccount) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceAccount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "secrets": + if r.TryDecodeAsNil() { + x.Secrets = nil + } else { + yyv10 := &x.Secrets + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceObjectReference((*[]ObjectReference)(yyv10), d) + } + } + case "imagePullSecrets": + if r.TryDecodeAsNil() { + x.ImagePullSecrets = nil + } else { + yyv12 := &x.ImagePullSecrets + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv12), d) + } + } + case "automountServiceAccountToken": + if r.TryDecodeAsNil() { + if x.AutomountServiceAccountToken != nil { + x.AutomountServiceAccountToken = nil + } + } else { + if x.AutomountServiceAccountToken == nil { + x.AutomountServiceAccountToken = new(bool) + } + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceAccount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv17 := &x.Kind + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv19 := &x.APIVersion + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv21 := &x.ObjectMeta + yym22 := z.DecBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.DecExt(yyv21) { + } else { + z.DecFallback(yyv21, false) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Secrets = nil + } else { + yyv23 := &x.Secrets + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceObjectReference((*[]ObjectReference)(yyv23), d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ImagePullSecrets = nil + } else { + yyv25 := &x.ImagePullSecrets + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv25), d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AutomountServiceAccountToken != nil { + x.AutomountServiceAccountToken = nil + } + } else { + if x.AutomountServiceAccountToken == nil { + x.AutomountServiceAccountToken = new(bool) + } + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServiceAccountList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceAccountList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceAccountList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceServiceAccount((*[]ServiceAccount)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceAccountList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceServiceAccount((*[]ServiceAccount)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Endpoints) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Subsets == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subsets")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Subsets == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Endpoints) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Endpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "subsets": + if r.TryDecodeAsNil() { + x.Subsets = nil + } else { + yyv10 := &x.Subsets + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceEndpointSubset((*[]EndpointSubset)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Endpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subsets = nil + } else { + yyv19 := &x.Subsets + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceEndpointSubset((*[]EndpointSubset)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EndpointSubset) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Addresses) != 0 + yyq2[1] = len(x.NotReadyAddresses) != 0 + yyq2[2] = len(x.Ports) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Addresses == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("addresses")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Addresses == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.NotReadyAddresses == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("notReadyAddresses")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NotReadyAddresses == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Ports == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EndpointSubset) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EndpointSubset) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "addresses": + if r.TryDecodeAsNil() { + x.Addresses = nil + } else { + yyv4 := &x.Addresses + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceEndpointAddress((*[]EndpointAddress)(yyv4), d) + } + } + case "notReadyAddresses": + if r.TryDecodeAsNil() { + x.NotReadyAddresses = nil + } else { + yyv6 := &x.NotReadyAddresses + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceEndpointAddress((*[]EndpointAddress)(yyv6), d) + } + } + case "ports": + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv8 := &x.Ports + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + h.decSliceEndpointPort((*[]EndpointPort)(yyv8), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EndpointSubset) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Addresses = nil + } else { + yyv11 := &x.Addresses + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceEndpointAddress((*[]EndpointAddress)(yyv11), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NotReadyAddresses = nil + } else { + yyv13 := &x.NotReadyAddresses + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceEndpointAddress((*[]EndpointAddress)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv15 := &x.Ports + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decSliceEndpointPort((*[]EndpointPort)(yyv15), d) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Hostname != "" + yyq2[2] = x.NodeName != nil + yyq2[3] = x.TargetRef != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IP)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ip")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IP)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostname")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.NodeName == nil { + r.EncodeNil() + } else { + yy10 := *x.NodeName + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NodeName == nil { + r.EncodeNil() + } else { + yy12 := *x.NodeName + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.TargetRef == nil { + r.EncodeNil() + } else { + x.TargetRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetRef == nil { + r.EncodeNil() + } else { + x.TargetRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EndpointAddress) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EndpointAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "ip": + if r.TryDecodeAsNil() { + x.IP = "" + } else { + yyv4 := &x.IP + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "hostname": + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv6 := &x.Hostname + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "nodeName": + if r.TryDecodeAsNil() { + if x.NodeName != nil { + x.NodeName = nil + } + } else { + if x.NodeName == nil { + x.NodeName = new(string) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(x.NodeName)) = r.DecodeString() + } + } + case "targetRef": + if r.TryDecodeAsNil() { + if x.TargetRef != nil { + x.TargetRef = nil + } + } else { + if x.TargetRef == nil { + x.TargetRef = new(ObjectReference) + } + x.TargetRef.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.IP = "" + } else { + yyv12 := &x.IP + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv14 := &x.Hostname + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NodeName != nil { + x.NodeName = nil + } + } else { + if x.NodeName == nil { + x.NodeName = new(string) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(x.NodeName)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetRef != nil { + x.TargetRef = nil + } + } else { + if x.TargetRef == nil { + x.TargetRef = new(ObjectReference) + } + x.TargetRef.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EndpointPort) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[2] = x.Protocol != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + x.Protocol.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protocol")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Protocol.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EndpointPort) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EndpointPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "port": + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv6 := &x.Port + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "protocol": + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv8 := &x.Protocol + yyv8.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EndpointPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv12 := &x.Port + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(yyv12)) = int32(r.DecodeInt(32)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv14 := &x.Protocol + yyv14.CodecDecodeSelf(d) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EndpointsList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceEndpoints(([]Endpoints)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceEndpoints(([]Endpoints)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EndpointsList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EndpointsList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceEndpoints((*[]Endpoints)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EndpointsList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceEndpoints((*[]Endpoints)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.PodCIDR != "" + yyq2[1] = x.ExternalID != "" + yyq2[2] = x.ProviderID != "" + yyq2[3] = x.Unschedulable != false + yyq2[4] = len(x.Taints) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podCIDR")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("externalID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("providerID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Unschedulable)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("unschedulable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Unschedulable)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Taints == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceTaint(([]Taint)(x.Taints), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("taints")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Taints == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSliceTaint(([]Taint)(x.Taints), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "podCIDR": + if r.TryDecodeAsNil() { + x.PodCIDR = "" + } else { + yyv4 := &x.PodCIDR + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "externalID": + if r.TryDecodeAsNil() { + x.ExternalID = "" + } else { + yyv6 := &x.ExternalID + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "providerID": + if r.TryDecodeAsNil() { + x.ProviderID = "" + } else { + yyv8 := &x.ProviderID + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "unschedulable": + if r.TryDecodeAsNil() { + x.Unschedulable = false + } else { + yyv10 := &x.Unschedulable + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "taints": + if r.TryDecodeAsNil() { + x.Taints = nil + } else { + yyv12 := &x.Taints + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + h.decSliceTaint((*[]Taint)(yyv12), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodCIDR = "" + } else { + yyv15 := &x.PodCIDR + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExternalID = "" + } else { + yyv17 := &x.ExternalID + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ProviderID = "" + } else { + yyv19 := &x.ProviderID + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Unschedulable = false + } else { + yyv21 := &x.Unschedulable + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Taints = nil + } else { + yyv23 := &x.Taints + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceTaint((*[]Taint)(yyv23), d) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DaemonEndpoint) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DaemonEndpoint) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DaemonEndpoint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "Port": + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv4 := &x.Port + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DaemonEndpoint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv7 := &x.Port + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeDaemonEndpoints) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.KubeletEndpoint + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kubeletEndpoint")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.KubeletEndpoint + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeDaemonEndpoints) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeDaemonEndpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kubeletEndpoint": + if r.TryDecodeAsNil() { + x.KubeletEndpoint = DaemonEndpoint{} + } else { + yyv4 := &x.KubeletEndpoint + yyv4.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeDaemonEndpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.KubeletEndpoint = DaemonEndpoint{} + } else { + yyv6 := &x.KubeletEndpoint + yyv6.CodecDecodeSelf(d) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 10 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MachineID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("machineID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MachineID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("systemUUID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.BootID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("bootID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.BootID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kernelVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.OSImage)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("osImage")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.OSImage)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerRuntimeVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kubeletVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kubeProxyVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("operatingSystem")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("architecture")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeSystemInfo) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeSystemInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "machineID": + if r.TryDecodeAsNil() { + x.MachineID = "" + } else { + yyv4 := &x.MachineID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "systemUUID": + if r.TryDecodeAsNil() { + x.SystemUUID = "" + } else { + yyv6 := &x.SystemUUID + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "bootID": + if r.TryDecodeAsNil() { + x.BootID = "" + } else { + yyv8 := &x.BootID + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "kernelVersion": + if r.TryDecodeAsNil() { + x.KernelVersion = "" + } else { + yyv10 := &x.KernelVersion + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "osImage": + if r.TryDecodeAsNil() { + x.OSImage = "" + } else { + yyv12 := &x.OSImage + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "containerRuntimeVersion": + if r.TryDecodeAsNil() { + x.ContainerRuntimeVersion = "" + } else { + yyv14 := &x.ContainerRuntimeVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "kubeletVersion": + if r.TryDecodeAsNil() { + x.KubeletVersion = "" + } else { + yyv16 := &x.KubeletVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + case "kubeProxyVersion": + if r.TryDecodeAsNil() { + x.KubeProxyVersion = "" + } else { + yyv18 := &x.KubeProxyVersion + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + case "operatingSystem": + if r.TryDecodeAsNil() { + x.OperatingSystem = "" + } else { + yyv20 := &x.OperatingSystem + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + case "architecture": + if r.TryDecodeAsNil() { + x.Architecture = "" + } else { + yyv22 := &x.Architecture + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MachineID = "" + } else { + yyv25 := &x.MachineID + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SystemUUID = "" + } else { + yyv27 := &x.SystemUUID + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.BootID = "" + } else { + yyv29 := &x.BootID + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.KernelVersion = "" + } else { + yyv31 := &x.KernelVersion + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.OSImage = "" + } else { + yyv33 := &x.OSImage + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerRuntimeVersion = "" + } else { + yyv35 := &x.ContainerRuntimeVersion + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.KubeletVersion = "" + } else { + yyv37 := &x.KubeletVersion + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.KubeProxyVersion = "" + } else { + yyv39 := &x.KubeProxyVersion + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.OperatingSystem = "" + } else { + yyv41 := &x.OperatingSystem + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*string)(yyv41)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Architecture = "" + } else { + yyv43 := &x.Architecture + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*string)(yyv43)) = r.DecodeString() + } + } + for { + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj24-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Capacity) != 0 + yyq2[1] = len(x.Allocatable) != 0 + yyq2[2] = x.Phase != "" + yyq2[3] = len(x.Conditions) != 0 + yyq2[4] = len(x.Addresses) != 0 + yyq2[5] = true + yyq2[6] = true + yyq2[7] = len(x.Images) != 0 + yyq2[8] = len(x.VolumesInUse) != 0 + yyq2[9] = len(x.VolumesAttached) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("capacity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Allocatable == nil { + r.EncodeNil() + } else { + x.Allocatable.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("allocatable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Allocatable == nil { + r.EncodeNil() + } else { + x.Allocatable.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + x.Phase.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("phase")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Phase.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Addresses == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("addresses")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Addresses == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yy19 := &x.DaemonEndpoints + yy19.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("daemonEndpoints")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy21 := &x.DaemonEndpoints + yy21.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yy24 := &x.NodeInfo + yy24.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeInfo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy26 := &x.NodeInfo + yy26.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.Images == nil { + r.EncodeNil() + } else { + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + h.encSliceContainerImage(([]ContainerImage)(x.Images), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("images")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Images == nil { + r.EncodeNil() + } else { + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + h.encSliceContainerImage(([]ContainerImage)(x.Images), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.VolumesInUse == nil { + r.EncodeNil() + } else { + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumesInUse")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VolumesInUse == nil { + r.EncodeNil() + } else { + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.VolumesAttached == nil { + r.EncodeNil() + } else { + yym35 := z.EncBinary() + _ = yym35 + if false { + } else { + h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumesAttached")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VolumesAttached == nil { + r.EncodeNil() + } else { + yym36 := z.EncBinary() + _ = yym36 + if false { + } else { + h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "capacity": + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv4 := &x.Capacity + yyv4.CodecDecodeSelf(d) + } + case "allocatable": + if r.TryDecodeAsNil() { + x.Allocatable = nil + } else { + yyv5 := &x.Allocatable + yyv5.CodecDecodeSelf(d) + } + case "phase": + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv6 := &x.Phase + yyv6.CodecDecodeSelf(d) + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv7 := &x.Conditions + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceNodeCondition((*[]NodeCondition)(yyv7), d) + } + } + case "addresses": + if r.TryDecodeAsNil() { + x.Addresses = nil + } else { + yyv9 := &x.Addresses + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceNodeAddress((*[]NodeAddress)(yyv9), d) + } + } + case "daemonEndpoints": + if r.TryDecodeAsNil() { + x.DaemonEndpoints = NodeDaemonEndpoints{} + } else { + yyv11 := &x.DaemonEndpoints + yyv11.CodecDecodeSelf(d) + } + case "nodeInfo": + if r.TryDecodeAsNil() { + x.NodeInfo = NodeSystemInfo{} + } else { + yyv12 := &x.NodeInfo + yyv12.CodecDecodeSelf(d) + } + case "images": + if r.TryDecodeAsNil() { + x.Images = nil + } else { + yyv13 := &x.Images + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceContainerImage((*[]ContainerImage)(yyv13), d) + } + } + case "volumesInUse": + if r.TryDecodeAsNil() { + x.VolumesInUse = nil + } else { + yyv15 := &x.VolumesInUse + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv15), d) + } + } + case "volumesAttached": + if r.TryDecodeAsNil() { + x.VolumesAttached = nil + } else { + yyv17 := &x.VolumesAttached + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decSliceAttachedVolume((*[]AttachedVolume)(yyv17), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj19 int + var yyb19 bool + var yyhl19 bool = l >= 0 + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv20 := &x.Capacity + yyv20.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Allocatable = nil + } else { + yyv21 := &x.Allocatable + yyv21.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv22 := &x.Phase + yyv22.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv23 := &x.Conditions + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceNodeCondition((*[]NodeCondition)(yyv23), d) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Addresses = nil + } else { + yyv25 := &x.Addresses + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + h.decSliceNodeAddress((*[]NodeAddress)(yyv25), d) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DaemonEndpoints = NodeDaemonEndpoints{} + } else { + yyv27 := &x.DaemonEndpoints + yyv27.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodeInfo = NodeSystemInfo{} + } else { + yyv28 := &x.NodeInfo + yyv28.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Images = nil + } else { + yyv29 := &x.Images + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + h.decSliceContainerImage((*[]ContainerImage)(yyv29), d) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumesInUse = nil + } else { + yyv31 := &x.VolumesInUse + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv31), d) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumesAttached = nil + } else { + yyv33 := &x.VolumesAttached + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + h.decSliceAttachedVolume((*[]AttachedVolume)(yyv33), d) + } + } + for { + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj19-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x UniqueVolumeName) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *UniqueVolumeName) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *AttachedVolume) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Name.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Name.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("devicePath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *AttachedVolume) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *AttachedVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yyv4.CodecDecodeSelf(d) + } + case "devicePath": + if r.TryDecodeAsNil() { + x.DevicePath = "" + } else { + yyv5 := &x.DevicePath + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *AttachedVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DevicePath = "" + } else { + yyv9 := &x.DevicePath + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *AvoidPods) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.PreferAvoidPods) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.PreferAvoidPods == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePreferAvoidPodsEntry(([]PreferAvoidPodsEntry)(x.PreferAvoidPods), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferAvoidPods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferAvoidPods == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePreferAvoidPodsEntry(([]PreferAvoidPodsEntry)(x.PreferAvoidPods), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *AvoidPods) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *AvoidPods) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "preferAvoidPods": + if r.TryDecodeAsNil() { + x.PreferAvoidPods = nil + } else { + yyv4 := &x.PreferAvoidPods + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePreferAvoidPodsEntry((*[]PreferAvoidPodsEntry)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *AvoidPods) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferAvoidPods = nil + } else { + yyv7 := &x.PreferAvoidPods + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSlicePreferAvoidPodsEntry((*[]PreferAvoidPodsEntry)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PreferAvoidPodsEntry) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = true + yyq2[2] = x.Reason != "" + yyq2[3] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.PodSignature + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podSignature")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.PodSignature + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.EvictionTime + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if yym10 { + z.EncBinaryMarshal(yy9) + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("evictionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.EvictionTime + yym12 := z.EncBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.EncExt(yy11) { + } else if yym12 { + z.EncBinaryMarshal(yy11) + } else if !yym12 && z.IsJSONHandle() { + z.EncJSONMarshal(yy11) + } else { + z.EncFallback(yy11) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PreferAvoidPodsEntry) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PreferAvoidPodsEntry) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "podSignature": + if r.TryDecodeAsNil() { + x.PodSignature = PodSignature{} + } else { + yyv4 := &x.PodSignature + yyv4.CodecDecodeSelf(d) + } + case "evictionTime": + if r.TryDecodeAsNil() { + x.EvictionTime = pkg2_v1.Time{} + } else { + yyv5 := &x.EvictionTime + yym6 := z.DecBinary() + _ = yym6 + if false { + } else if z.HasExtensions() && z.DecExt(yyv5) { + } else if yym6 { + z.DecBinaryUnmarshal(yyv5) + } else if !yym6 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv5) + } else { + z.DecFallback(yyv5, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv7 := &x.Reason + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv9 := &x.Message + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PreferAvoidPodsEntry) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodSignature = PodSignature{} + } else { + yyv12 := &x.PodSignature + yyv12.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EvictionTime = pkg2_v1.Time{} + } else { + yyv13 := &x.EvictionTime + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if yym14 { + z.DecBinaryUnmarshal(yyv13) + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv15 := &x.Reason + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv17 := &x.Message + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodSignature) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.PodController != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.PodController == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.PodController) { + } else { + z.EncFallback(x.PodController) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podController")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodController == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.PodController) { + } else { + z.EncFallback(x.PodController) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodSignature) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodSignature) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "podController": + if r.TryDecodeAsNil() { + if x.PodController != nil { + x.PodController = nil + } + } else { + if x.PodController == nil { + x.PodController = new(pkg2_v1.OwnerReference) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.PodController) { + } else { + z.DecFallback(x.PodController, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodSignature) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodController != nil { + x.PodController = nil + } + } else { + if x.PodController == nil { + x.PodController = new(pkg2_v1.OwnerReference) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.PodController) { + } else { + z.DecFallback(x.PodController, false) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerImage) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.SizeBytes != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Names == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Names, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("names")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Names == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Names, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.SizeBytes)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sizeBytes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.SizeBytes)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerImage) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerImage) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "names": + if r.TryDecodeAsNil() { + x.Names = nil + } else { + yyv4 := &x.Names + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "sizeBytes": + if r.TryDecodeAsNil() { + x.SizeBytes = 0 + } else { + yyv6 := &x.SizeBytes + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int64)(yyv6)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerImage) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Names = nil + } else { + yyv9 := &x.Names + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + z.F.DecSliceStringX(yyv9, false, d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SizeBytes = 0 + } else { + yyv11 := &x.SizeBytes + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int64)(yyv11)) = int64(r.DecodeInt(64)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x NodePhase) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *NodePhase) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x NodeConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *NodeConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *NodeCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Status.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Status.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastHeartbeatTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastHeartbeatTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastHeartbeatTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastHeartbeatTime": + if r.TryDecodeAsNil() { + x.LastHeartbeatTime = pkg2_v1.Time{} + } else { + yyv6 := &x.LastHeartbeatTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastHeartbeatTime = pkg2_v1.Time{} + } else { + yyv17 := &x.LastHeartbeatTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x NodeAddressType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *NodeAddressType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *NodeAddress) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Address)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("address")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Address)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeAddress) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "address": + if r.TryDecodeAsNil() { + x.Address = "" + } else { + yyv5 := &x.Address + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv8 := &x.Type + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Address = "" + } else { + yyv9 := &x.Address + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ResourceName) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ResourceName) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x ResourceList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encResourceList((ResourceList)(x), e) + } + } +} + +func (x *ResourceList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decResourceList((*ResourceList)(x), d) + } +} + +func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = NodeSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = NodeStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = NodeSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = NodeStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceNode(([]Node)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceNode(([]Node)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceNode((*[]Node)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceNode((*[]Node)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x FinalizerName) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *FinalizerName) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *NamespaceSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Finalizers) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Finalizers == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("finalizers")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Finalizers == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NamespaceSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NamespaceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "finalizers": + if r.TryDecodeAsNil() { + x.Finalizers = nil + } else { + yyv4 := &x.Finalizers + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceFinalizerName((*[]FinalizerName)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NamespaceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Finalizers = nil + } else { + yyv7 := &x.Finalizers + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceFinalizerName((*[]FinalizerName)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NamespaceStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Phase.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("phase")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Phase.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NamespaceStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NamespaceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "phase": + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NamespaceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv6 := &x.Phase + yyv6.CodecDecodeSelf(d) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x NamespacePhase) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *NamespacePhase) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *Namespace) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Namespace) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Namespace) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = NamespaceSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = NamespaceStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Namespace) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = NamespaceSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = NamespaceStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NamespaceList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceNamespace(([]Namespace)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceNamespace(([]Namespace)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NamespaceList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NamespaceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceNamespace((*[]Namespace)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NamespaceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceNamespace((*[]Namespace)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Binding) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Target + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("target")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Target + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Binding) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Binding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "target": + if r.TryDecodeAsNil() { + x.Target = ObjectReference{} + } else { + yyv10 := &x.Target + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Binding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Target = ObjectReference{} + } else { + yyv18 := &x.Target + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Preconditions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.UID != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.UID == nil { + r.EncodeNil() + } else { + yy4 := *x.UID + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.UID == nil { + r.EncodeNil() + } else { + yy6 := *x.UID + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Preconditions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Preconditions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "uid": + if r.TryDecodeAsNil() { + if x.UID != nil { + x.UID = nil + } + } else { + if x.UID == nil { + x.UID = new(pkg1_types.UID) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.UID) { + } else { + *((*string)(x.UID)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Preconditions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.UID != nil { + x.UID = nil + } + } else { + if x.UID == nil { + x.UID = new(pkg1_types.UID) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.UID) { + } else { + *((*string)(x.UID)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x DeletionPropagation) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DeletionPropagation) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.GracePeriodSeconds != nil + yyq2[3] = x.Preconditions != nil + yyq2[4] = x.OrphanDependents != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.GracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy10 := *x.GracePeriodSeconds + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gracePeriodSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.GracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy12 := *x.GracePeriodSeconds + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Preconditions == nil { + r.EncodeNil() + } else { + x.Preconditions.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preconditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Preconditions == nil { + r.EncodeNil() + } else { + x.Preconditions.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.OrphanDependents == nil { + r.EncodeNil() + } else { + yy18 := *x.OrphanDependents + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(yy18)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("orphanDependents")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.OrphanDependents == nil { + r.EncodeNil() + } else { + yy20 := *x.OrphanDependents + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeBool(bool(yy20)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.PropagationPolicy == nil { + r.EncodeNil() + } else { + yy23 := *x.PropagationPolicy + yy23.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("PropagationPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PropagationPolicy == nil { + r.EncodeNil() + } else { + yy25 := *x.PropagationPolicy + yy25.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "gracePeriodSeconds": + if r.TryDecodeAsNil() { + if x.GracePeriodSeconds != nil { + x.GracePeriodSeconds = nil + } + } else { + if x.GracePeriodSeconds == nil { + x.GracePeriodSeconds = new(int64) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + case "preconditions": + if r.TryDecodeAsNil() { + if x.Preconditions != nil { + x.Preconditions = nil + } + } else { + if x.Preconditions == nil { + x.Preconditions = new(Preconditions) + } + x.Preconditions.CodecDecodeSelf(d) + } + case "orphanDependents": + if r.TryDecodeAsNil() { + if x.OrphanDependents != nil { + x.OrphanDependents = nil + } + } else { + if x.OrphanDependents == nil { + x.OrphanDependents = new(bool) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(x.OrphanDependents)) = r.DecodeBool() + } + } + case "PropagationPolicy": + if r.TryDecodeAsNil() { + if x.PropagationPolicy != nil { + x.PropagationPolicy = nil + } + } else { + if x.PropagationPolicy == nil { + x.PropagationPolicy = new(DeletionPropagation) + } + x.PropagationPolicy.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv15 := &x.Kind + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv17 := &x.APIVersion + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GracePeriodSeconds != nil { + x.GracePeriodSeconds = nil + } + } else { + if x.GracePeriodSeconds == nil { + x.GracePeriodSeconds = new(int64) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Preconditions != nil { + x.Preconditions = nil + } + } else { + if x.Preconditions == nil { + x.Preconditions = new(Preconditions) + } + x.Preconditions.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.OrphanDependents != nil { + x.OrphanDependents = nil + } + } else { + if x.OrphanDependents == nil { + x.OrphanDependents = new(bool) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*bool)(x.OrphanDependents)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PropagationPolicy != nil { + x.PropagationPolicy = nil + } + } else { + if x.PropagationPolicy == nil { + x.PropagationPolicy = new(DeletionPropagation) + } + x.PropagationPolicy.CodecDecodeSelf(d) + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.LabelSelector != "" + yyq2[3] = x.FieldSelector != "" + yyq2[4] = x.IncludeUninitialized != false + yyq2[5] = x.Watch != false + yyq2[6] = x.ResourceVersion != "" + yyq2[7] = x.TimeoutSeconds != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.IncludeUninitialized)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("includeUninitialized")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.IncludeUninitialized)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.Watch)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("watch")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.Watch)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.TimeoutSeconds == nil { + r.EncodeNil() + } else { + yy25 := *x.TimeoutSeconds + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(yy25)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TimeoutSeconds == nil { + r.EncodeNil() + } else { + yy27 := *x.TimeoutSeconds + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeInt(int64(yy27)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ListOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ListOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "labelSelector": + if r.TryDecodeAsNil() { + x.LabelSelector = "" + } else { + yyv8 := &x.LabelSelector + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "fieldSelector": + if r.TryDecodeAsNil() { + x.FieldSelector = "" + } else { + yyv10 := &x.FieldSelector + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "includeUninitialized": + if r.TryDecodeAsNil() { + x.IncludeUninitialized = false + } else { + yyv12 := &x.IncludeUninitialized + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "watch": + if r.TryDecodeAsNil() { + x.Watch = false + } else { + yyv14 := &x.Watch + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "resourceVersion": + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv16 := &x.ResourceVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + case "timeoutSeconds": + if r.TryDecodeAsNil() { + if x.TimeoutSeconds != nil { + x.TimeoutSeconds = nil + } + } else { + if x.TimeoutSeconds == nil { + x.TimeoutSeconds = new(int64) + } + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj20 int + var yyb20 bool + var yyhl20 bool = l >= 0 + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv21 := &x.Kind + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv23 := &x.APIVersion + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LabelSelector = "" + } else { + yyv25 := &x.LabelSelector + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FieldSelector = "" + } else { + yyv27 := &x.FieldSelector + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.IncludeUninitialized = false + } else { + yyv29 := &x.IncludeUninitialized + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Watch = false + } else { + yyv31 := &x.Watch + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*bool)(yyv31)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv33 := &x.ResourceVersion + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TimeoutSeconds != nil { + x.TimeoutSeconds = nil + } + } else { + if x.TimeoutSeconds == nil { + x.TimeoutSeconds = new(int64) + } + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) + } + } + for { + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj20-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Container != "" + yyq2[3] = x.Follow != false + yyq2[4] = x.Previous != false + yyq2[5] = x.SinceSeconds != nil + yyq2[6] = x.SinceTime != nil + yyq2[7] = x.Timestamps != false + yyq2[8] = x.TailLines != nil + yyq2[9] = x.LimitBytes != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("container")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Follow)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("follow")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Follow)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Previous)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("previous")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Previous)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.SinceSeconds == nil { + r.EncodeNil() + } else { + yy19 := *x.SinceSeconds + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(yy19)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sinceSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SinceSeconds == nil { + r.EncodeNil() + } else { + yy21 := *x.SinceSeconds + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(yy21)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.SinceTime == nil { + r.EncodeNil() + } else { + yym24 := z.EncBinary() + _ = yym24 + if false { + } else if z.HasExtensions() && z.EncExt(x.SinceTime) { + } else if yym24 { + z.EncBinaryMarshal(x.SinceTime) + } else if !yym24 && z.IsJSONHandle() { + z.EncJSONMarshal(x.SinceTime) + } else { + z.EncFallback(x.SinceTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sinceTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SinceTime == nil { + r.EncodeNil() + } else { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else if z.HasExtensions() && z.EncExt(x.SinceTime) { + } else if yym25 { + z.EncBinaryMarshal(x.SinceTime) + } else if !yym25 && z.IsJSONHandle() { + z.EncJSONMarshal(x.SinceTime) + } else { + z.EncFallback(x.SinceTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym27 := z.EncBinary() + _ = yym27 + if false { + } else { + r.EncodeBool(bool(x.Timestamps)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("timestamps")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeBool(bool(x.Timestamps)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.TailLines == nil { + r.EncodeNil() + } else { + yy30 := *x.TailLines + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeInt(int64(yy30)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tailLines")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TailLines == nil { + r.EncodeNil() + } else { + yy32 := *x.TailLines + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + r.EncodeInt(int64(yy32)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.LimitBytes == nil { + r.EncodeNil() + } else { + yy35 := *x.LimitBytes + yym36 := z.EncBinary() + _ = yym36 + if false { + } else { + r.EncodeInt(int64(yy35)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("limitBytes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LimitBytes == nil { + r.EncodeNil() + } else { + yy37 := *x.LimitBytes + yym38 := z.EncBinary() + _ = yym38 + if false { + } else { + r.EncodeInt(int64(yy37)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodLogOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodLogOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "container": + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv8 := &x.Container + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "follow": + if r.TryDecodeAsNil() { + x.Follow = false + } else { + yyv10 := &x.Follow + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "previous": + if r.TryDecodeAsNil() { + x.Previous = false + } else { + yyv12 := &x.Previous + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "sinceSeconds": + if r.TryDecodeAsNil() { + if x.SinceSeconds != nil { + x.SinceSeconds = nil + } + } else { + if x.SinceSeconds == nil { + x.SinceSeconds = new(int64) + } + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64)) + } + } + case "sinceTime": + if r.TryDecodeAsNil() { + if x.SinceTime != nil { + x.SinceTime = nil + } + } else { + if x.SinceTime == nil { + x.SinceTime = new(pkg2_v1.Time) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(x.SinceTime) { + } else if yym17 { + z.DecBinaryUnmarshal(x.SinceTime) + } else if !yym17 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.SinceTime) + } else { + z.DecFallback(x.SinceTime, false) + } + } + case "timestamps": + if r.TryDecodeAsNil() { + x.Timestamps = false + } else { + yyv18 := &x.Timestamps + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*bool)(yyv18)) = r.DecodeBool() + } + } + case "tailLines": + if r.TryDecodeAsNil() { + if x.TailLines != nil { + x.TailLines = nil + } + } else { + if x.TailLines == nil { + x.TailLines = new(int64) + } + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int64)(x.TailLines)) = int64(r.DecodeInt(64)) + } + } + case "limitBytes": + if r.TryDecodeAsNil() { + if x.LimitBytes != nil { + x.LimitBytes = nil + } + } else { + if x.LimitBytes == nil { + x.LimitBytes = new(int64) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv25 := &x.Kind + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv27 := &x.APIVersion + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv29 := &x.Container + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Follow = false + } else { + yyv31 := &x.Follow + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*bool)(yyv31)) = r.DecodeBool() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Previous = false + } else { + yyv33 := &x.Previous + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*bool)(yyv33)) = r.DecodeBool() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SinceSeconds != nil { + x.SinceSeconds = nil + } + } else { + if x.SinceSeconds == nil { + x.SinceSeconds = new(int64) + } + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SinceTime != nil { + x.SinceTime = nil + } + } else { + if x.SinceTime == nil { + x.SinceTime = new(pkg2_v1.Time) + } + yym38 := z.DecBinary() + _ = yym38 + if false { + } else if z.HasExtensions() && z.DecExt(x.SinceTime) { + } else if yym38 { + z.DecBinaryUnmarshal(x.SinceTime) + } else if !yym38 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.SinceTime) + } else { + z.DecFallback(x.SinceTime, false) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Timestamps = false + } else { + yyv39 := &x.Timestamps + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*bool)(yyv39)) = r.DecodeBool() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TailLines != nil { + x.TailLines = nil + } + } else { + if x.TailLines == nil { + x.TailLines = new(int64) + } + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*int64)(x.TailLines)) = int64(r.DecodeInt(64)) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LimitBytes != nil { + x.LimitBytes = nil + } + } else { + if x.LimitBytes == nil { + x.LimitBytes = new(int64) + } + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64)) + } + } + for { + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj24-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Stdin != false + yyq2[3] = x.Stdout != false + yyq2[4] = x.Stderr != false + yyq2[5] = x.TTY != false + yyq2[6] = x.Container != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdin")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Stdout)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdout")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Stdout)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Stderr)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stderr")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Stderr)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tty")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("container")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAttachOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAttachOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "stdin": + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv8 := &x.Stdin + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "stdout": + if r.TryDecodeAsNil() { + x.Stdout = false + } else { + yyv10 := &x.Stdout + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "stderr": + if r.TryDecodeAsNil() { + x.Stderr = false + } else { + yyv12 := &x.Stderr + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "tty": + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv14 := &x.TTY + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "container": + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv16 := &x.Container + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv19 := &x.Kind + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv21 := &x.APIVersion + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv23 := &x.Stdin + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stdout = false + } else { + yyv25 := &x.Stdout + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stderr = false + } else { + yyv27 := &x.Stderr + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv29 := &x.TTY + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv31 := &x.Container + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Stdin != false + yyq2[3] = x.Stdout != false + yyq2[4] = x.Stderr != false + yyq2[5] = x.TTY != false + yyq2[6] = x.Container != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdin")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Stdout)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdout")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Stdout)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Stderr)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stderr")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Stderr)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tty")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("container")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Command == nil { + r.EncodeNil() + } else { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("command")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Command == nil { + r.EncodeNil() + } else { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodExecOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodExecOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "stdin": + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv8 := &x.Stdin + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "stdout": + if r.TryDecodeAsNil() { + x.Stdout = false + } else { + yyv10 := &x.Stdout + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "stderr": + if r.TryDecodeAsNil() { + x.Stderr = false + } else { + yyv12 := &x.Stderr + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "tty": + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv14 := &x.TTY + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "container": + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv16 := &x.Container + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + case "command": + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv18 := &x.Command + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + z.F.DecSliceStringX(yyv18, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj20 int + var yyb20 bool + var yyhl20 bool = l >= 0 + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv21 := &x.Kind + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv23 := &x.APIVersion + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv25 := &x.Stdin + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stdout = false + } else { + yyv27 := &x.Stdout + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stderr = false + } else { + yyv29 := &x.Stderr + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv31 := &x.TTY + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*bool)(yyv31)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv33 := &x.Container + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv35 := &x.Command + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + z.F.DecSliceStringX(yyv35, false, d) + } + } + for { + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj20-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodPortForwardOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = len(x.Ports) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Ports == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceInt32V(x.Ports, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceInt32V(x.Ports, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodPortForwardOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodPortForwardOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "ports": + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv8 := &x.Ports + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceInt32X(yyv8, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodPortForwardOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv13 := &x.APIVersion + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv15 := &x.Ports + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + z.F.DecSliceInt32X(yyv15, false, d) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Path != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv8 := &x.Path + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv13 := &x.APIVersion + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv15 := &x.Path + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Path != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv8 := &x.Path + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv13 := &x.APIVersion + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv15 := &x.Path + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Path != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv8 := &x.Path + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv13 := &x.APIVersion + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv15 := &x.Path + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.Namespace != "" + yyq2[2] = x.Name != "" + yyq2[3] = x.UID != "" + yyq2[4] = x.APIVersion != "" + yyq2[5] = x.ResourceVersion != "" + yyq2[6] = x.FieldPath != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.UID) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(x.UID) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv6 := &x.Namespace + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "uid": + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv10 := &x.UID + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(yyv10) { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv12 := &x.APIVersion + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "resourceVersion": + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv14 := &x.ResourceVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "fieldPath": + if r.TryDecodeAsNil() { + x.FieldPath = "" + } else { + yyv16 := &x.FieldPath + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv19 := &x.Kind + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv21 := &x.Namespace + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv23 := &x.Name + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv25 := &x.UID + yym26 := z.DecBinary() + _ = yym26 + if false { + } else if z.HasExtensions() && z.DecExt(yyv25) { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv27 := &x.APIVersion + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv29 := &x.ResourceVersion + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FieldPath = "" + } else { + yyv31 := &x.FieldPath + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LocalObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LocalObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LocalObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LocalObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv7 := &x.Name + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SerializedReference) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.Reference + yy10.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reference")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.Reference + yy12.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SerializedReference) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SerializedReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "reference": + if r.TryDecodeAsNil() { + x.Reference = ObjectReference{} + } else { + yyv8 := &x.Reference + yyv8.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SerializedReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv10 := &x.Kind + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv12 := &x.APIVersion + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reference = ObjectReference{} + } else { + yyv14 := &x.Reference + yyv14.CodecDecodeSelf(d) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EventSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Component != "" + yyq2[1] = x.Host != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Component)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("component")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Component)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("host")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EventSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EventSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "component": + if r.TryDecodeAsNil() { + x.Component = "" + } else { + yyv4 := &x.Component + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "host": + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv6 := &x.Host + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EventSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Component = "" + } else { + yyv9 := &x.Component + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv11 := &x.Host + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Event) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [11]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + yyq2[6] = true + yyq2[7] = true + yyq2[8] = true + yyq2[9] = x.Count != 0 + yyq2[10] = x.Type != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(11) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.InvolvedObject + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("involvedObject")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.InvolvedObject + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yy26 := &x.Source + yy26.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("source")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy28 := &x.Source + yy28.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yy31 := &x.FirstTimestamp + yym32 := z.EncBinary() + _ = yym32 + if false { + } else if z.HasExtensions() && z.EncExt(yy31) { + } else if yym32 { + z.EncBinaryMarshal(yy31) + } else if !yym32 && z.IsJSONHandle() { + z.EncJSONMarshal(yy31) + } else { + z.EncFallback(yy31) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("firstTimestamp")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy33 := &x.FirstTimestamp + yym34 := z.EncBinary() + _ = yym34 + if false { + } else if z.HasExtensions() && z.EncExt(yy33) { + } else if yym34 { + z.EncBinaryMarshal(yy33) + } else if !yym34 && z.IsJSONHandle() { + z.EncJSONMarshal(yy33) + } else { + z.EncFallback(yy33) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yy36 := &x.LastTimestamp + yym37 := z.EncBinary() + _ = yym37 + if false { + } else if z.HasExtensions() && z.EncExt(yy36) { + } else if yym37 { + z.EncBinaryMarshal(yy36) + } else if !yym37 && z.IsJSONHandle() { + z.EncJSONMarshal(yy36) + } else { + z.EncFallback(yy36) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTimestamp")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy38 := &x.LastTimestamp + yym39 := z.EncBinary() + _ = yym39 + if false { + } else if z.HasExtensions() && z.EncExt(yy38) { + } else if yym39 { + z.EncBinaryMarshal(yy38) + } else if !yym39 && z.IsJSONHandle() { + z.EncJSONMarshal(yy38) + } else { + z.EncFallback(yy38) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + r.EncodeInt(int64(x.Count)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("count")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym42 := z.EncBinary() + _ = yym42 + if false { + } else { + r.EncodeInt(int64(x.Count)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Type)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym45 := z.EncBinary() + _ = yym45 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Type)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Event) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Event) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "involvedObject": + if r.TryDecodeAsNil() { + x.InvolvedObject = ObjectReference{} + } else { + yyv10 := &x.InvolvedObject + yyv10.CodecDecodeSelf(d) + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv11 := &x.Reason + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv13 := &x.Message + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + case "source": + if r.TryDecodeAsNil() { + x.Source = EventSource{} + } else { + yyv15 := &x.Source + yyv15.CodecDecodeSelf(d) + } + case "firstTimestamp": + if r.TryDecodeAsNil() { + x.FirstTimestamp = pkg2_v1.Time{} + } else { + yyv16 := &x.FirstTimestamp + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else if yym17 { + z.DecBinaryUnmarshal(yyv16) + } else if !yym17 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv16) + } else { + z.DecFallback(yyv16, false) + } + } + case "lastTimestamp": + if r.TryDecodeAsNil() { + x.LastTimestamp = pkg2_v1.Time{} + } else { + yyv18 := &x.LastTimestamp + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else if yym19 { + z.DecBinaryUnmarshal(yyv18) + } else if !yym19 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv18) + } else { + z.DecFallback(yyv18, false) + } + } + case "count": + if r.TryDecodeAsNil() { + x.Count = 0 + } else { + yyv20 := &x.Count + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int32)(yyv20)) = int32(r.DecodeInt(32)) + } + } + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv22 := &x.Type + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv25 := &x.Kind + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv27 := &x.APIVersion + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv29 := &x.ObjectMeta + yym30 := z.DecBinary() + _ = yym30 + if false { + } else if z.HasExtensions() && z.DecExt(yyv29) { + } else { + z.DecFallback(yyv29, false) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.InvolvedObject = ObjectReference{} + } else { + yyv31 := &x.InvolvedObject + yyv31.CodecDecodeSelf(d) + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv32 := &x.Reason + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*string)(yyv32)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv34 := &x.Message + yym35 := z.DecBinary() + _ = yym35 + if false { + } else { + *((*string)(yyv34)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Source = EventSource{} + } else { + yyv36 := &x.Source + yyv36.CodecDecodeSelf(d) + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FirstTimestamp = pkg2_v1.Time{} + } else { + yyv37 := &x.FirstTimestamp + yym38 := z.DecBinary() + _ = yym38 + if false { + } else if z.HasExtensions() && z.DecExt(yyv37) { + } else if yym38 { + z.DecBinaryUnmarshal(yyv37) + } else if !yym38 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv37) + } else { + z.DecFallback(yyv37, false) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTimestamp = pkg2_v1.Time{} + } else { + yyv39 := &x.LastTimestamp + yym40 := z.DecBinary() + _ = yym40 + if false { + } else if z.HasExtensions() && z.DecExt(yyv39) { + } else if yym40 { + z.DecBinaryUnmarshal(yyv39) + } else if !yym40 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv39) + } else { + z.DecFallback(yyv39, false) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Count = 0 + } else { + yyv41 := &x.Count + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*int32)(yyv41)) = int32(r.DecodeInt(32)) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv43 := &x.Type + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*string)(yyv43)) = r.DecodeString() + } + } + for { + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj24-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EventList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceEvent(([]Event)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceEvent(([]Event)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EventList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EventList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceEvent((*[]Event)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EventList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceEvent((*[]Event)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *List) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceruntime_RawExtension(([]pkg5_runtime.RawExtension)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceruntime_RawExtension(([]pkg5_runtime.RawExtension)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *List) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *List) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *List) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x LimitType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *LimitType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Type != "" + yyq2[1] = len(x.Max) != 0 + yyq2[2] = len(x.Min) != 0 + yyq2[3] = len(x.Default) != 0 + yyq2[4] = len(x.DefaultRequest) != 0 + yyq2[5] = len(x.MaxLimitRequestRatio) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Max == nil { + r.EncodeNil() + } else { + x.Max.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("max")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Max == nil { + r.EncodeNil() + } else { + x.Max.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Min == nil { + r.EncodeNil() + } else { + x.Min.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("min")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Min == nil { + r.EncodeNil() + } else { + x.Min.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Default == nil { + r.EncodeNil() + } else { + x.Default.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("default")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Default == nil { + r.EncodeNil() + } else { + x.Default.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.DefaultRequest == nil { + r.EncodeNil() + } else { + x.DefaultRequest.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultRequest")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultRequest == nil { + r.EncodeNil() + } else { + x.DefaultRequest.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.MaxLimitRequestRatio == nil { + r.EncodeNil() + } else { + x.MaxLimitRequestRatio.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxLimitRequestRatio")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MaxLimitRequestRatio == nil { + r.EncodeNil() + } else { + x.MaxLimitRequestRatio.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LimitRangeItem) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LimitRangeItem) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "max": + if r.TryDecodeAsNil() { + x.Max = nil + } else { + yyv5 := &x.Max + yyv5.CodecDecodeSelf(d) + } + case "min": + if r.TryDecodeAsNil() { + x.Min = nil + } else { + yyv6 := &x.Min + yyv6.CodecDecodeSelf(d) + } + case "default": + if r.TryDecodeAsNil() { + x.Default = nil + } else { + yyv7 := &x.Default + yyv7.CodecDecodeSelf(d) + } + case "defaultRequest": + if r.TryDecodeAsNil() { + x.DefaultRequest = nil + } else { + yyv8 := &x.DefaultRequest + yyv8.CodecDecodeSelf(d) + } + case "maxLimitRequestRatio": + if r.TryDecodeAsNil() { + x.MaxLimitRequestRatio = nil + } else { + yyv9 := &x.MaxLimitRequestRatio + yyv9.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LimitRangeItem) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv11 := &x.Type + yyv11.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Max = nil + } else { + yyv12 := &x.Max + yyv12.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Min = nil + } else { + yyv13 := &x.Min + yyv13.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Default = nil + } else { + yyv14 := &x.Default + yyv14.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DefaultRequest = nil + } else { + yyv15 := &x.DefaultRequest + yyv15.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MaxLimitRequestRatio = nil + } else { + yyv16 := &x.MaxLimitRequestRatio + yyv16.CodecDecodeSelf(d) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LimitRangeSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Limits == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("limits")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Limits == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LimitRangeSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LimitRangeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "limits": + if r.TryDecodeAsNil() { + x.Limits = nil + } else { + yyv4 := &x.Limits + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LimitRangeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Limits = nil + } else { + yyv7 := &x.Limits + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LimitRange) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LimitRange) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LimitRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = LimitRangeSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LimitRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = LimitRangeSpec{} + } else { + yyv18 := &x.Spec + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LimitRangeList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceLimitRange(([]LimitRange)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceLimitRange(([]LimitRange)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LimitRangeList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LimitRangeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceLimitRange((*[]LimitRange)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LimitRangeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceLimitRange((*[]LimitRange)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ResourceQuotaScope) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ResourceQuotaScope) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Hard) != 0 + yyq2[1] = len(x.Scopes) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Hard == nil { + r.EncodeNil() + } else { + x.Hard.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hard")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Hard == nil { + r.EncodeNil() + } else { + x.Hard.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Scopes == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scopes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Scopes == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceQuotaSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceQuotaSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "hard": + if r.TryDecodeAsNil() { + x.Hard = nil + } else { + yyv4 := &x.Hard + yyv4.CodecDecodeSelf(d) + } + case "scopes": + if r.TryDecodeAsNil() { + x.Scopes = nil + } else { + yyv5 := &x.Scopes + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceQuotaSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hard = nil + } else { + yyv8 := &x.Hard + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Scopes = nil + } else { + yyv9 := &x.Scopes + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv9), d) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceQuotaStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Hard) != 0 + yyq2[1] = len(x.Used) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Hard == nil { + r.EncodeNil() + } else { + x.Hard.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hard")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Hard == nil { + r.EncodeNil() + } else { + x.Hard.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Used == nil { + r.EncodeNil() + } else { + x.Used.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("used")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Used == nil { + r.EncodeNil() + } else { + x.Used.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceQuotaStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceQuotaStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "hard": + if r.TryDecodeAsNil() { + x.Hard = nil + } else { + yyv4 := &x.Hard + yyv4.CodecDecodeSelf(d) + } + case "used": + if r.TryDecodeAsNil() { + x.Used = nil + } else { + yyv5 := &x.Used + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceQuotaStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hard = nil + } else { + yyv7 := &x.Hard + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Used = nil + } else { + yyv8 := &x.Used + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceQuota) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceQuota) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceQuota) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ResourceQuotaSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ResourceQuotaStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceQuota) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ResourceQuotaSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ResourceQuotaStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceQuotaList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceQuotaList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceQuotaList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceResourceQuota((*[]ResourceQuota)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceQuotaList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceResourceQuota((*[]ResourceQuota)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Secret) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Data) != 0 + yyq2[4] = len(x.StringData) != 0 + yyq2[5] = x.Type != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Data == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("data")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Data == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.StringData == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + z.F.EncMapStringStringV(x.StringData, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stringData")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StringData == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + z.F.EncMapStringStringV(x.StringData, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Secret) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Secret) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "data": + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv10 := &x.Data + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decMapstringSliceuint8((*map[string][]uint8)(yyv10), d) + } + } + case "stringData": + if r.TryDecodeAsNil() { + x.StringData = nil + } else { + yyv12 := &x.StringData + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecMapStringStringX(yyv12, false, d) + } + } + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv14 := &x.Type + yyv14.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Secret) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv16 := &x.Kind + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv18 := &x.APIVersion + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv20 := &x.ObjectMeta + yym21 := z.DecBinary() + _ = yym21 + if false { + } else if z.HasExtensions() && z.DecExt(yyv20) { + } else { + z.DecFallback(yyv20, false) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv22 := &x.Data + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + h.decMapstringSliceuint8((*map[string][]uint8)(yyv22), d) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StringData = nil + } else { + yyv24 := &x.StringData + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + z.F.DecMapStringStringX(yyv24, false, d) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv26 := &x.Type + yyv26.CodecDecodeSelf(d) + } + for { + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj15-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x SecretType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *SecretType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *SecretList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceSecret(([]Secret)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceSecret(([]Secret)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceSecret((*[]Secret)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceSecret((*[]Secret)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMap) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Data) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Data == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + z.F.EncMapStringStringV(x.Data, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("data")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Data == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncMapStringStringV(x.Data, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMap) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMap) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "data": + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv10 := &x.Data + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecMapStringStringX(yyv10, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMap) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv19 := &x.Data + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + z.F.DecMapStringStringX(yyv19, false, d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceConfigMap(([]ConfigMap)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceConfigMap(([]ConfigMap)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceConfigMap((*[]ConfigMap)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceConfigMap((*[]ConfigMap)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ComponentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ComponentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ComponentCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.Message != "" + yyq2[3] = x.Error != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Status.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Status.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Error)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("error")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Error)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ComponentCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ComponentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv6 := &x.Message + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "error": + if r.TryDecodeAsNil() { + x.Error = "" + } else { + yyv8 := &x.Error + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ComponentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv11 := &x.Type + yyv11.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv13 := &x.Message + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Error = "" + } else { + yyv15 := &x.Error + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ComponentStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ComponentStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ComponentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv10 := &x.Conditions + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceComponentCondition((*[]ComponentCondition)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ComponentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv19 := &x.Conditions + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceComponentCondition((*[]ComponentCondition)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ComponentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ComponentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ComponentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceComponentStatus((*[]ComponentStatus)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ComponentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceComponentStatus((*[]ComponentStatus)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DownwardAPIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Items) != 0 + yyq2[1] = x.DefaultMode != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Items == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy7 := *x.DefaultMode + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy9 := *x.DefaultMode + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DownwardAPIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DownwardAPIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv4 := &x.Items + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv4), d) + } + } + case "defaultMode": + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DownwardAPIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv9 := &x.Items + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FieldRef != nil + yyq2[2] = x.ResourceFieldRef != nil + yyq2[3] = x.Mode != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Mode == nil { + r.EncodeNil() + } else { + yy13 := *x.Mode + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(yy13)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("mode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Mode == nil { + r.EncodeNil() + } else { + yy15 := *x.Mode + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(yy15)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DownwardAPIVolumeFile) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DownwardAPIVolumeFile) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fieldRef": + if r.TryDecodeAsNil() { + if x.FieldRef != nil { + x.FieldRef = nil + } + } else { + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) + } + case "resourceFieldRef": + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) + } + case "mode": + if r.TryDecodeAsNil() { + if x.Mode != nil { + x.Mode = nil + } + } else { + if x.Mode == nil { + x.Mode = new(int32) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv11 := &x.Path + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FieldRef != nil { + x.FieldRef = nil + } + } else { + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Mode != nil { + x.Mode = nil + } + } else { + if x.Mode == nil { + x.Mode = new(int32) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DownwardAPIProjection) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Items) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Items == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DownwardAPIProjection) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DownwardAPIProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv4 := &x.Items + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DownwardAPIProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv7 := &x.Items + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Capabilities != nil + yyq2[1] = x.Privileged != nil + yyq2[2] = x.SELinuxOptions != nil + yyq2[3] = x.RunAsUser != nil + yyq2[4] = x.RunAsNonRoot != nil + yyq2[5] = x.ReadOnlyRootFilesystem != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Capabilities == nil { + r.EncodeNil() + } else { + x.Capabilities.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("capabilities")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Capabilities == nil { + r.EncodeNil() + } else { + x.Capabilities.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Privileged == nil { + r.EncodeNil() + } else { + yy7 := *x.Privileged + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("privileged")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Privileged == nil { + r.EncodeNil() + } else { + yy9 := *x.Privileged + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.RunAsUser == nil { + r.EncodeNil() + } else { + yy15 := *x.RunAsUser + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else { + r.EncodeInt(int64(yy15)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RunAsUser == nil { + r.EncodeNil() + } else { + yy17 := *x.RunAsUser + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else { + r.EncodeInt(int64(yy17)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.RunAsNonRoot == nil { + r.EncodeNil() + } else { + yy20 := *x.RunAsNonRoot + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeBool(bool(yy20)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RunAsNonRoot == nil { + r.EncodeNil() + } else { + yy22 := *x.RunAsNonRoot + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(yy22)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.ReadOnlyRootFilesystem == nil { + r.EncodeNil() + } else { + yy25 := *x.ReadOnlyRootFilesystem + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeBool(bool(yy25)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ReadOnlyRootFilesystem == nil { + r.EncodeNil() + } else { + yy27 := *x.ReadOnlyRootFilesystem + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeBool(bool(yy27)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecurityContext) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "capabilities": + if r.TryDecodeAsNil() { + if x.Capabilities != nil { + x.Capabilities = nil + } + } else { + if x.Capabilities == nil { + x.Capabilities = new(Capabilities) + } + x.Capabilities.CodecDecodeSelf(d) + } + case "privileged": + if r.TryDecodeAsNil() { + if x.Privileged != nil { + x.Privileged = nil + } + } else { + if x.Privileged == nil { + x.Privileged = new(bool) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*bool)(x.Privileged)) = r.DecodeBool() + } + } + case "seLinuxOptions": + if r.TryDecodeAsNil() { + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } + } else { + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(SELinuxOptions) + } + x.SELinuxOptions.CodecDecodeSelf(d) + } + case "runAsUser": + if r.TryDecodeAsNil() { + if x.RunAsUser != nil { + x.RunAsUser = nil + } + } else { + if x.RunAsUser == nil { + x.RunAsUser = new(pkg1_types.UnixUserID) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(x.RunAsUser) { + } else { + *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) + } + } + case "runAsNonRoot": + if r.TryDecodeAsNil() { + if x.RunAsNonRoot != nil { + x.RunAsNonRoot = nil + } + } else { + if x.RunAsNonRoot == nil { + x.RunAsNonRoot = new(bool) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() + } + } + case "readOnlyRootFilesystem": + if r.TryDecodeAsNil() { + if x.ReadOnlyRootFilesystem != nil { + x.ReadOnlyRootFilesystem = nil + } + } else { + if x.ReadOnlyRootFilesystem == nil { + x.ReadOnlyRootFilesystem = new(bool) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Capabilities != nil { + x.Capabilities = nil + } + } else { + if x.Capabilities == nil { + x.Capabilities = new(Capabilities) + } + x.Capabilities.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Privileged != nil { + x.Privileged = nil + } + } else { + if x.Privileged == nil { + x.Privileged = new(bool) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*bool)(x.Privileged)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } + } else { + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(SELinuxOptions) + } + x.SELinuxOptions.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RunAsUser != nil { + x.RunAsUser = nil + } + } else { + if x.RunAsUser == nil { + x.RunAsUser = new(pkg1_types.UnixUserID) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(x.RunAsUser) { + } else { + *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RunAsNonRoot != nil { + x.RunAsNonRoot = nil + } + } else { + if x.RunAsNonRoot == nil { + x.RunAsNonRoot = new(bool) + } + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ReadOnlyRootFilesystem != nil { + x.ReadOnlyRootFilesystem = nil + } + } else { + if x.ReadOnlyRootFilesystem == nil { + x.ReadOnlyRootFilesystem = new(bool) + } + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SELinuxOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.User != "" + yyq2[1] = x.Role != "" + yyq2[2] = x.Type != "" + yyq2[3] = x.Level != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Role)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("role")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Role)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Type)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Type)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Level)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("level")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Level)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SELinuxOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SELinuxOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "user": + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv4 := &x.User + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "role": + if r.TryDecodeAsNil() { + x.Role = "" + } else { + yyv6 := &x.Role + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv8 := &x.Type + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "level": + if r.TryDecodeAsNil() { + x.Level = "" + } else { + yyv10 := &x.Level + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SELinuxOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv13 := &x.User + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Role = "" + } else { + yyv15 := &x.Role + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv17 := &x.Type + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Level = "" + } else { + yyv19 := &x.Level + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RangeAllocation) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Range)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("range")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Range)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Data == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("data")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Data == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RangeAllocation) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RangeAllocation) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "range": + if r.TryDecodeAsNil() { + x.Range = "" + } else { + yyv10 := &x.Range + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "data": + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv12 := &x.Data + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *yyv12 = r.DecodeBytes(*(*[]byte)(yyv12), false, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RangeAllocation) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv15 := &x.Kind + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv17 := &x.APIVersion + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv19 := &x.ObjectMeta + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Range = "" + } else { + yyv21 := &x.Range + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv23 := &x.Data + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *yyv23 = r.DecodeBytes(*(*[]byte)(yyv23), false, false) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Sysctl) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Sysctl) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Sysctl) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "Name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv6 := &x.Value + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Sysctl) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv11 := &x.Value + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeResources) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Capacity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeResources) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeResources) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "Capacity": + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv4 := &x.Capacity + yyv4.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeResources) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv6 := &x.Capacity + yyv6.CodecDecodeSelf(d) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSlicev1_OwnerReference(v []pkg2_v1.OwnerReference, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yym3 := z.EncBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.EncExt(yy2) { + } else { + z.EncFallback(yy2) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicev1_OwnerReference(v *[]pkg2_v1.OwnerReference, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg2_v1.OwnerReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 80) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg2_v1.OwnerReference, yyrl1) + } + } else { + yyv1 = make([]pkg2_v1.OwnerReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_v1.OwnerReference{} + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.DecExt(yyv2) { + } else { + z.DecFallback(yyv2, false) + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg2_v1.OwnerReference{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_v1.OwnerReference{} + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg2_v1.OwnerReference{}) // var yyz1 pkg2_v1.OwnerReference + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_v1.OwnerReference{} + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else { + z.DecFallback(yyv6, false) + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg2_v1.OwnerReference{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePersistentVolumeAccessMode(v []PersistentVolumeAccessMode, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePersistentVolumeAccessMode(v *[]PersistentVolumeAccessMode, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PersistentVolumeAccessMode{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PersistentVolumeAccessMode, yyrl1) + } + } else { + yyv1 = make([]PersistentVolumeAccessMode, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 PersistentVolumeAccessMode + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PersistentVolumeAccessMode{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePersistentVolume(v []PersistentVolume, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePersistentVolume(v *[]PersistentVolume, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PersistentVolume{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 552) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PersistentVolume, yyrl1) + } + } else { + yyv1 = make([]PersistentVolume, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolume{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PersistentVolume{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolume{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PersistentVolume{}) // var yyz1 PersistentVolume + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolume{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PersistentVolume{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePersistentVolumeClaim(v []PersistentVolumeClaim, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePersistentVolumeClaim(v *[]PersistentVolumeClaim, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PersistentVolumeClaim{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 384) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PersistentVolumeClaim, yyrl1) + } + } else { + yyv1 = make([]PersistentVolumeClaim, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolumeClaim{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PersistentVolumeClaim{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolumeClaim{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PersistentVolumeClaim{}) // var yyz1 PersistentVolumeClaim + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolumeClaim{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PersistentVolumeClaim{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceKeyToPath(v []KeyToPath, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []KeyToPath{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]KeyToPath, yyrl1) + } + } else { + yyv1 = make([]KeyToPath, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = KeyToPath{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, KeyToPath{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = KeyToPath{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, KeyToPath{}) // var yyz1 KeyToPath + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = KeyToPath{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []KeyToPath{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceVolumeProjection(v []VolumeProjection, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceVolumeProjection(v *[]VolumeProjection, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []VolumeProjection{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]VolumeProjection, yyrl1) + } + } else { + yyv1 = make([]VolumeProjection, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeProjection{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, VolumeProjection{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeProjection{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, VolumeProjection{}) // var yyz1 VolumeProjection + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeProjection{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []VolumeProjection{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceHTTPHeader(v []HTTPHeader, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceHTTPHeader(v *[]HTTPHeader, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HTTPHeader{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]HTTPHeader, yyrl1) + } + } else { + yyv1 = make([]HTTPHeader, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HTTPHeader{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HTTPHeader{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HTTPHeader{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HTTPHeader{}) // var yyz1 HTTPHeader + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = HTTPHeader{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HTTPHeader{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceCapability(v []Capability, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCapability(v *[]Capability, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Capability{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Capability, yyrl1) + } + } else { + yyv1 = make([]Capability, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 Capability + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Capability{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceContainerPort(v []ContainerPort, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceContainerPort(v *[]ContainerPort, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ContainerPort{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ContainerPort, yyrl1) + } + } else { + yyv1 = make([]ContainerPort, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerPort{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ContainerPort{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerPort{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ContainerPort{}) // var yyz1 ContainerPort + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerPort{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ContainerPort{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEnvFromSource(v []EnvFromSource, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEnvFromSource(v *[]EnvFromSource, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EnvFromSource{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]EnvFromSource, yyrl1) + } + } else { + yyv1 = make([]EnvFromSource, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvFromSource{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EnvFromSource{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvFromSource{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EnvFromSource{}) // var yyz1 EnvFromSource + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvFromSource{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EnvFromSource{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEnvVar(v []EnvVar, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEnvVar(v *[]EnvVar, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EnvVar{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]EnvVar, yyrl1) + } + } else { + yyv1 = make([]EnvVar, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvVar{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EnvVar{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvVar{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EnvVar{}) // var yyz1 EnvVar + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvVar{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EnvVar{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceVolumeMount(v []VolumeMount, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceVolumeMount(v *[]VolumeMount, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []VolumeMount{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]VolumeMount, yyrl1) + } + } else { + yyv1 = make([]VolumeMount, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeMount{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, VolumeMount{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeMount{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, VolumeMount{}) // var yyz1 VolumeMount + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeMount{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []VolumeMount{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNodeSelectorTerm(v []NodeSelectorTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNodeSelectorTerm(v *[]NodeSelectorTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NodeSelectorTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NodeSelectorTerm, yyrl1) + } + } else { + yyv1 = make([]NodeSelectorTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NodeSelectorTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NodeSelectorTerm{}) // var yyz1 NodeSelectorTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NodeSelectorTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNodeSelectorRequirement(v []NodeSelectorRequirement, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNodeSelectorRequirement(v *[]NodeSelectorRequirement, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NodeSelectorRequirement{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NodeSelectorRequirement, yyrl1) + } + } else { + yyv1 = make([]NodeSelectorRequirement, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorRequirement{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NodeSelectorRequirement{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorRequirement{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NodeSelectorRequirement{}) // var yyz1 NodeSelectorRequirement + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorRequirement{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NodeSelectorRequirement{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePodAffinityTerm(v []PodAffinityTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodAffinityTerm(v *[]PodAffinityTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodAffinityTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodAffinityTerm, yyrl1) + } + } else { + yyv1 = make([]PodAffinityTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodAffinityTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodAffinityTerm{}) // var yyz1 PodAffinityTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodAffinityTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceWeightedPodAffinityTerm(v []WeightedPodAffinityTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceWeightedPodAffinityTerm(v *[]WeightedPodAffinityTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []WeightedPodAffinityTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]WeightedPodAffinityTerm, yyrl1) + } + } else { + yyv1 = make([]WeightedPodAffinityTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, WeightedPodAffinityTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, WeightedPodAffinityTerm{}) // var yyz1 WeightedPodAffinityTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []WeightedPodAffinityTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePreferredSchedulingTerm(v []PreferredSchedulingTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePreferredSchedulingTerm(v *[]PreferredSchedulingTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PreferredSchedulingTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PreferredSchedulingTerm, yyrl1) + } + } else { + yyv1 = make([]PreferredSchedulingTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferredSchedulingTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PreferredSchedulingTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferredSchedulingTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PreferredSchedulingTerm{}) // var yyz1 PreferredSchedulingTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferredSchedulingTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PreferredSchedulingTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceVolume(v []Volume, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceVolume(v *[]Volume, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Volume{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 232) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Volume, yyrl1) + } + } else { + yyv1 = make([]Volume, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Volume{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Volume{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Volume{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Volume{}) // var yyz1 Volume + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Volume{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Volume{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceContainer(v []Container, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceContainer(v *[]Container, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Container{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Container, yyrl1) + } + } else { + yyv1 = make([]Container, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Container{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Container{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Container{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Container{}) // var yyz1 Container + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Container{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Container{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceLocalObjectReference(v []LocalObjectReference, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceLocalObjectReference(v *[]LocalObjectReference, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LocalObjectReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]LocalObjectReference, yyrl1) + } + } else { + yyv1 = make([]LocalObjectReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LocalObjectReference{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LocalObjectReference{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LocalObjectReference{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LocalObjectReference{}) // var yyz1 LocalObjectReference + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = LocalObjectReference{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LocalObjectReference{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceToleration(v []Toleration, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceToleration(v *[]Toleration, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Toleration{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Toleration, yyrl1) + } + } else { + yyv1 = make([]Toleration, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Toleration{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Toleration{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Toleration{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Toleration{}) // var yyz1 Toleration + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Toleration{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Toleration{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceHostAlias(v []HostAlias, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceHostAlias(v *[]HostAlias, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HostAlias{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]HostAlias, yyrl1) + } + } else { + yyv1 = make([]HostAlias, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HostAlias{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HostAlias{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HostAlias{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HostAlias{}) // var yyz1 HostAlias + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = HostAlias{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HostAlias{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicetypes_UnixGroupID(v []pkg1_types.UnixGroupID, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else if z.HasExtensions() && z.EncExt(yyv1) { + } else { + r.EncodeInt(int64(yyv1)) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicetypes_UnixGroupID(v *[]pkg1_types.UnixGroupID, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg1_types.UnixGroupID{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg1_types.UnixGroupID, yyrl1) + } + } else { + yyv1 = make([]pkg1_types.UnixGroupID, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = 0 + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.DecExt(yyv2) { + } else { + *((*int64)(yyv2)) = int64(r.DecodeInt(64)) + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, 0) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = 0 + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, 0) // var yyz1 pkg1_types.UnixGroupID + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = 0 + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else { + *((*int64)(yyv6)) = int64(r.DecodeInt(64)) + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg1_types.UnixGroupID{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePodCondition(v []PodCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodCondition(v *[]PodCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodCondition, yyrl1) + } + } else { + yyv1 = make([]PodCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodCondition{}) // var yyz1 PodCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceContainerStatus(v []ContainerStatus, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceContainerStatus(v *[]ContainerStatus, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ContainerStatus{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ContainerStatus, yyrl1) + } + } else { + yyv1 = make([]ContainerStatus, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerStatus{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ContainerStatus{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerStatus{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ContainerStatus{}) // var yyz1 ContainerStatus + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerStatus{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ContainerStatus{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePod(v []Pod, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Pod{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Pod, yyrl1) + } + } else { + yyv1 = make([]Pod, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Pod{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Pod{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Pod{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Pod{}) // var yyz1 Pod + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Pod{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Pod{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePodTemplate(v []PodTemplate, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodTemplate{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 824) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodTemplate, yyrl1) + } + } else { + yyv1 = make([]PodTemplate, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodTemplate{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodTemplate{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodTemplate{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodTemplate{}) // var yyz1 PodTemplate + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodTemplate{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodTemplate{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceReplicationControllerCondition(v []ReplicationControllerCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceReplicationControllerCondition(v *[]ReplicationControllerCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ReplicationControllerCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 88) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ReplicationControllerCondition, yyrl1) + } + } else { + yyv1 = make([]ReplicationControllerCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationControllerCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ReplicationControllerCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationControllerCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ReplicationControllerCondition{}) // var yyz1 ReplicationControllerCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationControllerCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ReplicationControllerCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceReplicationController(v []ReplicationController, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceReplicationController(v *[]ReplicationController, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ReplicationController{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 344) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ReplicationController, yyrl1) + } + } else { + yyv1 = make([]ReplicationController, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationController{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ReplicationController{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationController{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ReplicationController{}) // var yyz1 ReplicationController + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationController{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ReplicationController{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceLoadBalancerIngress(v []LoadBalancerIngress, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceLoadBalancerIngress(v *[]LoadBalancerIngress, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LoadBalancerIngress{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]LoadBalancerIngress, yyrl1) + } + } else { + yyv1 = make([]LoadBalancerIngress, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LoadBalancerIngress{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LoadBalancerIngress{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LoadBalancerIngress{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LoadBalancerIngress{}) // var yyz1 LoadBalancerIngress + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = LoadBalancerIngress{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LoadBalancerIngress{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceServicePort(v []ServicePort, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceServicePort(v *[]ServicePort, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ServicePort{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 80) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ServicePort, yyrl1) + } + } else { + yyv1 = make([]ServicePort, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServicePort{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ServicePort{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServicePort{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ServicePort{}) // var yyz1 ServicePort + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServicePort{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ServicePort{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceService(v []Service, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceService(v *[]Service, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Service{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 472) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Service, yyrl1) + } + } else { + yyv1 = make([]Service, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Service{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Service{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Service{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Service{}) // var yyz1 Service + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Service{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Service{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceObjectReference(v []ObjectReference, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceObjectReference(v *[]ObjectReference, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ObjectReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ObjectReference, yyrl1) + } + } else { + yyv1 = make([]ObjectReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ObjectReference{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ObjectReference{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ObjectReference{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ObjectReference{}) // var yyz1 ObjectReference + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ObjectReference{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ObjectReference{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceServiceAccount(v []ServiceAccount, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceServiceAccount(v *[]ServiceAccount, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ServiceAccount{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 320) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ServiceAccount, yyrl1) + } + } else { + yyv1 = make([]ServiceAccount, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServiceAccount{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ServiceAccount{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServiceAccount{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ServiceAccount{}) // var yyz1 ServiceAccount + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServiceAccount{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ServiceAccount{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEndpointSubset(v []EndpointSubset, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEndpointSubset(v *[]EndpointSubset, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EndpointSubset{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]EndpointSubset, yyrl1) + } + } else { + yyv1 = make([]EndpointSubset, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointSubset{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EndpointSubset{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointSubset{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EndpointSubset{}) // var yyz1 EndpointSubset + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointSubset{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EndpointSubset{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEndpointAddress(v []EndpointAddress, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEndpointAddress(v *[]EndpointAddress, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EndpointAddress{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]EndpointAddress, yyrl1) + } + } else { + yyv1 = make([]EndpointAddress, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointAddress{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EndpointAddress{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointAddress{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EndpointAddress{}) // var yyz1 EndpointAddress + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointAddress{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EndpointAddress{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEndpointPort(v []EndpointPort, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEndpointPort(v *[]EndpointPort, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EndpointPort{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]EndpointPort, yyrl1) + } + } else { + yyv1 = make([]EndpointPort, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointPort{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EndpointPort{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointPort{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EndpointPort{}) // var yyz1 EndpointPort + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointPort{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EndpointPort{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEndpoints(v []Endpoints, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEndpoints(v *[]Endpoints, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Endpoints{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Endpoints, yyrl1) + } + } else { + yyv1 = make([]Endpoints, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Endpoints{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Endpoints{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Endpoints{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Endpoints{}) // var yyz1 Endpoints + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Endpoints{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Endpoints{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceTaint(v []Taint, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceTaint(v *[]Taint, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Taint{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Taint, yyrl1) + } + } else { + yyv1 = make([]Taint, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Taint{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Taint{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Taint{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Taint{}) // var yyz1 Taint + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Taint{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Taint{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNodeCondition(v []NodeCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNodeCondition(v *[]NodeCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NodeCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NodeCondition, yyrl1) + } + } else { + yyv1 = make([]NodeCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NodeCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NodeCondition{}) // var yyz1 NodeCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NodeCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNodeAddress(v []NodeAddress, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNodeAddress(v *[]NodeAddress, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NodeAddress{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NodeAddress, yyrl1) + } + } else { + yyv1 = make([]NodeAddress, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeAddress{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NodeAddress{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeAddress{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NodeAddress{}) // var yyz1 NodeAddress + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeAddress{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NodeAddress{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceContainerImage(v []ContainerImage, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceContainerImage(v *[]ContainerImage, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ContainerImage{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ContainerImage, yyrl1) + } + } else { + yyv1 = make([]ContainerImage, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerImage{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ContainerImage{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerImage{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ContainerImage{}) // var yyz1 ContainerImage + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerImage{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ContainerImage{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceUniqueVolumeName(v []UniqueVolumeName, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceUniqueVolumeName(v *[]UniqueVolumeName, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []UniqueVolumeName{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]UniqueVolumeName, yyrl1) + } + } else { + yyv1 = make([]UniqueVolumeName, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 UniqueVolumeName + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []UniqueVolumeName{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceAttachedVolume(v []AttachedVolume, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceAttachedVolume(v *[]AttachedVolume, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []AttachedVolume{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]AttachedVolume, yyrl1) + } + } else { + yyv1 = make([]AttachedVolume, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = AttachedVolume{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, AttachedVolume{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = AttachedVolume{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, AttachedVolume{}) // var yyz1 AttachedVolume + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = AttachedVolume{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []AttachedVolume{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePreferAvoidPodsEntry(v []PreferAvoidPodsEntry, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePreferAvoidPodsEntry(v *[]PreferAvoidPodsEntry, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PreferAvoidPodsEntry{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PreferAvoidPodsEntry, yyrl1) + } + } else { + yyv1 = make([]PreferAvoidPodsEntry, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferAvoidPodsEntry{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PreferAvoidPodsEntry{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferAvoidPodsEntry{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PreferAvoidPodsEntry{}) // var yyz1 PreferAvoidPodsEntry + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferAvoidPodsEntry{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PreferAvoidPodsEntry{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encResourceList(v ResourceList, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yyk1.CodecEncodeSelf(e) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy3 := &yyv1 + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(yy3) { + } else if !yym4 && z.IsJSONHandle() { + z.EncJSONMarshal(yy3) + } else { + z.EncFallback(yy3) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decResourceList(v *ResourceList, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 72) + yyv1 = make(map[ResourceName]pkg3_resource.Quantity, yyrl1) + *v = yyv1 + } + var yymk1 ResourceName + var yymv1 pkg3_resource.Quantity + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yyv2.CodecDecodeSelf(d) + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = pkg3_resource.Quantity{} + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = pkg3_resource.Quantity{} + } else { + yyv3 := &yymv1 + yym4 := z.DecBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.DecExt(yyv3) { + } else if !yym4 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv3) + } else { + z.DecFallback(yyv3, false) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv5 := &yymk1 + yyv5.CodecDecodeSelf(d) + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = pkg3_resource.Quantity{} + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = pkg3_resource.Quantity{} + } else { + yyv6 := &yymv1 + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encSliceNode(v []Node, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNode(v *[]Node, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Node{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 664) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Node, yyrl1) + } + } else { + yyv1 = make([]Node, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Node{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Node{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Node{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Node{}) // var yyz1 Node + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Node{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Node{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceFinalizerName(v []FinalizerName, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceFinalizerName(v *[]FinalizerName, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []FinalizerName{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]FinalizerName, yyrl1) + } + } else { + yyv1 = make([]FinalizerName, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 FinalizerName + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []FinalizerName{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNamespace(v []Namespace, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNamespace(v *[]Namespace, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Namespace{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 304) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Namespace, yyrl1) + } + } else { + yyv1 = make([]Namespace, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Namespace{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Namespace{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Namespace{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Namespace{}) // var yyz1 Namespace + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Namespace{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Namespace{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEvent(v []Event, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEvent(v *[]Event, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Event{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 512) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Event, yyrl1) + } + } else { + yyv1 = make([]Event, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Event{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Event{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Event{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Event{}) // var yyz1 Event + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Event{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Event{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceruntime_RawExtension(v []pkg5_runtime.RawExtension, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yym3 := z.EncBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.EncExt(yy2) { + } else if !yym3 && z.IsJSONHandle() { + z.EncJSONMarshal(yy2) + } else { + z.EncFallback(yy2) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg5_runtime.RawExtension, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg5_runtime.RawExtension{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg5_runtime.RawExtension, yyrl1) + } + } else { + yyv1 = make([]pkg5_runtime.RawExtension, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg5_runtime.RawExtension{} + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.DecExt(yyv2) { + } else if !yym3 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv2) + } else { + z.DecFallback(yyv2, false) + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg5_runtime.RawExtension{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg5_runtime.RawExtension{} + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) + } else { + z.DecFallback(yyv4, false) + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg5_runtime.RawExtension{}) // var yyz1 pkg5_runtime.RawExtension + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg5_runtime.RawExtension{} + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg5_runtime.RawExtension{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceLimitRangeItem(v []LimitRangeItem, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceLimitRangeItem(v *[]LimitRangeItem, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LimitRangeItem{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]LimitRangeItem, yyrl1) + } + } else { + yyv1 = make([]LimitRangeItem, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRangeItem{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LimitRangeItem{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRangeItem{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LimitRangeItem{}) // var yyz1 LimitRangeItem + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRangeItem{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LimitRangeItem{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceLimitRange(v []LimitRange, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceLimitRange(v *[]LimitRange, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LimitRange{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]LimitRange, yyrl1) + } + } else { + yyv1 = make([]LimitRange, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRange{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LimitRange{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRange{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LimitRange{}) // var yyz1 LimitRange + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRange{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LimitRange{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceResourceQuotaScope(v []ResourceQuotaScope, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceResourceQuotaScope(v *[]ResourceQuotaScope, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ResourceQuotaScope{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ResourceQuotaScope, yyrl1) + } + } else { + yyv1 = make([]ResourceQuotaScope, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 ResourceQuotaScope + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ResourceQuotaScope{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceResourceQuota(v []ResourceQuota, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceResourceQuota(v *[]ResourceQuota, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ResourceQuota{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 312) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ResourceQuota, yyrl1) + } + } else { + yyv1 = make([]ResourceQuota, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ResourceQuota{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ResourceQuota{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ResourceQuota{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ResourceQuota{}) // var yyz1 ResourceQuota + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ResourceQuota{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ResourceQuota{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encMapstringSliceuint8(v map[string][]uint8, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yym3 := z.EncBinary() + _ = yym3 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(yyv1)) + } + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringSliceuint8(v *map[string][]uint8, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string][]uint8, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 []uint8 + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv4 := &yymv1 + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *yyv4 = r.DecodeBytes(*(*[]byte)(yyv4), false, false) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv6 := &yymk1 + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv8 := &yymv1 + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *yyv8 = r.DecodeBytes(*(*[]byte)(yyv8), false, false) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encSliceuint8(v []uint8, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(v)) +} + +func (x codecSelfer1234) decSliceuint8(v *[]uint8, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + *v = r.DecodeBytes(*((*[]byte)(v)), false, false) +} + +func (x codecSelfer1234) encSliceSecret(v []Secret, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceSecret(v *[]Secret, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Secret{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Secret, yyrl1) + } + } else { + yyv1 = make([]Secret, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Secret{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Secret{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Secret{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Secret{}) // var yyz1 Secret + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Secret{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Secret{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceConfigMap(v []ConfigMap, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceConfigMap(v *[]ConfigMap, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ConfigMap{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 272) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ConfigMap, yyrl1) + } + } else { + yyv1 = make([]ConfigMap, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ConfigMap{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ConfigMap{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ConfigMap{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ConfigMap{}) // var yyz1 ConfigMap + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ConfigMap{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ConfigMap{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceComponentCondition(v []ComponentCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceComponentCondition(v *[]ComponentCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ComponentCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ComponentCondition, yyrl1) + } + } else { + yyv1 = make([]ComponentCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ComponentCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ComponentCondition{}) // var yyz1 ComponentCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ComponentCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceComponentStatus(v []ComponentStatus, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceComponentStatus(v *[]ComponentStatus, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ComponentStatus{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ComponentStatus, yyrl1) + } + } else { + yyv1 = make([]ComponentStatus, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentStatus{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ComponentStatus{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentStatus{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ComponentStatus{}) // var yyz1 ComponentStatus + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentStatus{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ComponentStatus{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceDownwardAPIVolumeFile(v []DownwardAPIVolumeFile, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFile, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []DownwardAPIVolumeFile{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]DownwardAPIVolumeFile, yyrl1) + } + } else { + yyv1 = make([]DownwardAPIVolumeFile, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DownwardAPIVolumeFile{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, DownwardAPIVolumeFile{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DownwardAPIVolumeFile{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, DownwardAPIVolumeFile{}) // var yyz1 DownwardAPIVolumeFile + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = DownwardAPIVolumeFile{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []DownwardAPIVolumeFile{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/types.go b/vendor/k8s.io/client-go/pkg/api/v1/types.go new file mode 100644 index 000000000..ba7723296 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/types.go @@ -0,0 +1,4617 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// The comments for the structs and fields can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored and not exported to the SwaggerAPI. +// +// The aforementioned methods can be generated by hack/update-generated-swagger-docs.sh + +// Common string formats +// --------------------- +// Many fields in this API have formatting requirements. The commonly used +// formats are defined here. +// +// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier" +// in the C language. This is captured by the following regex: +// [A-Za-z_][A-Za-z0-9_]* +// This defines the format, but not the length restriction, which should be +// specified at the definition of any field of this type. +// +// DNS_LABEL: This is a string, no more than 63 characters long, that conforms +// to the definition of a "label" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])? +// +// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms +// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// or more simply: +// DNS_LABEL(\.DNS_LABEL)* +// +// IANA_SVC_NAME: This is a string, no more than 15 characters long, that +// conforms to the definition of IANA service name in RFC 6335. +// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. +// Hypens ('-') cannot be leading or trailing character of the string +// and cannot be adjacent to other hyphens. + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. +// +k8s:openapi-gen=false +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // +optional + GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"` + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + CreationTimestamp metav1.Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + DeletionTimestamp *metav1.Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + // +patchMergeKey=uid + // +patchStrategy=merge + OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` + + // An initializer is a controller which enforces some system invariant at object creation time. + // This field is a list of initializers that have not yet acted on this object. If nil or empty, + // this object has been completely initialized. Otherwise, the object is considered uninitialized + // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to + // observe uninitialized objects. + // + // When an object is created, the system will populate this list with the current set of initializers. + // Only privileged users may set or modify this list. Once it is empty, it may not be modified further + // by any user. + Initializers *metav1.Initializers `json:"initializers,omitempty" patchStrategy:"merge" protobuf:"bytes,16,rep,name=initializers"` + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + // +patchStrategy=merge + Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` +} + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault string = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll string = "" +) + +// Volume represents a named volume in a pod that may be accessed by any container in the pod. +type Volume struct { + // Volume's name. + // Must be a DNS_LABEL and unique within the pod. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // VolumeSource represents the location and type of the mounted volume. + // If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"` +} + +// Represents the source of a volume to mount. +// Only one of its members may be specified. +type VolumeSource struct { + // HostPath represents a pre-existing file or directory on the host + // machine that is directly exposed to the container. This is generally + // used for system agents or other privileged things that are allowed + // to see the host machine. Most containers will NOT need this. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"` + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + // +optional + EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"` + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"` + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"` + // GitRepo represents a git repository at a particular revision. + // +optional + GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"` + // Secret represents a secret that should populate this volume. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"` + // NFS represents an NFS mount on the host that shares a pod's lifetime + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + // +optional + NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // +optional + ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // +optional + Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` + // PersistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // +optional + RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an + // alpha feature and may change in future. + // +optional + FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"` + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"` + // DownwardAPI represents downward API about the pod that should populate this volume + // +optional + DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"` + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"` + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"` + // ConfigMap represents a configMap that should populate this volume + // +optional + ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"` + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"` + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"` + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"` + // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"` + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"` + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"` + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"` + // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + // +optional + StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"` +} + +// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. +// This volume finds the bound PV and mounts that volume for the pod. A +// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another +// type of volume that is owned by someone else (the system). +type PersistentVolumeClaimVolumeSource struct { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"` + // Will force the ReadOnly setting in VolumeMounts. + // Default false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` +} + +// PersistentVolumeSource is similar to VolumeSource but meant for the +// administrator who creates PVs. Exactly one of its members must be set. +type PersistentVolumeSource struct { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"` + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"` + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // +optional + HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` + // Glusterfs represents a Glusterfs volume that is attached to a host and + // exposed to the pod. Provisioned by an admin. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // +optional + Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` + // NFS represents an NFS mount on the host. Provisioned by an admin. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + // +optional + NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // +optional + RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // +optional + ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"` + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"` + // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"` + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an + // alpha feature and may change in future. + // +optional + FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"` + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"` + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"` + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"` + // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"` + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"` + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"` + // Local represents directly-attached storage with node affinity + // +optional + Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"` + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // +optional + StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` +} + +const ( + // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. + // It's currently still used and will be held for backwards compatibility + BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" + + // MountOptionAnnotation defines mount option annotation used in PVs + MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" + + // AlphaStorageNodeAffinityAnnotation defines node affinity policies for a PersistentVolume. + // Value is a string of the json representation of type NodeAffinity + AlphaStorageNodeAffinityAnnotation = "volume.alpha.kubernetes.io/node-affinity" +) + +// +genclient=true +// +nonNamespaced=true + +// PersistentVolume (PV) is a storage resource provisioned by an administrator. +// It is analogous to a node. +// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes +type PersistentVolume struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines a specification of a persistent volume owned by the cluster. + // Provisioned by an administrator. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes + // +optional + Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status represents the current information/status for the persistent volume. + // Populated by the system. + // Read-only. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes + // +optional + Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PersistentVolumeSpec is the specification of a persistent volume. +type PersistentVolumeSpec struct { + // A description of the persistent volume's resources and capacity. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // +optional + Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` + // The actual volume backing the persistent volume. + PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"` + // AccessModes contains all ways the volume can be mounted. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // Expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding + // +optional + ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"` + // What happens to a persistent volume when released from its claim. + // Valid options are Retain (default) and Recycle. + // Recycling must be supported by the volume plugin underlying this persistent volume. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming + // +optional + PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"` + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"` +} + +// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes. +type PersistentVolumeReclaimPolicy string + +const ( + // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. + // The volume plugin must support Recycling. + PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" + // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. + // The volume plugin must support Deletion. + PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" + // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. + // The default policy is Retain. + PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" +) + +// PersistentVolumeStatus is the current status of a persistent volume. +type PersistentVolumeStatus struct { + // Phase indicates if a volume is available, bound to a claim, or released by a claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase + // +optional + Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"` + // A human-readable message indicating details about why the volume is in this state. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // Reason is a brief CamelCase string that describes any failure and is meant + // for machine parsing and tidy display in the CLI. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` +} + +// PersistentVolumeList is a list of PersistentVolume items. +type PersistentVolumeList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of persistent volumes. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +type PersistentVolumeClaim struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired characteristics of a volume requested by a pod author. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status represents the current information/status of a persistent volume claim. + // Read-only. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. +type PersistentVolumeClaimList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // A list of persistent volume claims. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +type PersistentVolumeClaimSpec struct { + // AccessModes contains the desired access modes the volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` + // A label query over volumes to consider for binding. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + // Resources represents the minimum resources the volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + // +optional + Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` + // VolumeName is the binding reference to the PersistentVolume backing this claim. + // +optional + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"` + // Name of the StorageClass required by the claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + // +optional + StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` +} + +// PersistentVolumeClaimStatus is the current status of a persistent volume claim. +type PersistentVolumeClaimStatus struct { + // Phase represents the current phase of PersistentVolumeClaim. + // +optional + Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"` + // AccessModes contains the actual access modes the volume backing the PVC has. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` + // Represents the actual resources of the underlying volume. + // +optional + Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` +} + +type PersistentVolumeAccessMode string + +const ( + // can be mounted read/write mode to exactly 1 host + ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" + // can be mounted in read-only mode to many hosts + ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" + // can be mounted in read/write mode to many hosts + ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" +) + +type PersistentVolumePhase string + +const ( + // used for PersistentVolumes that are not available + VolumePending PersistentVolumePhase = "Pending" + // used for PersistentVolumes that are not yet bound + // Available volumes are held by the binder and matched to PersistentVolumeClaims + VolumeAvailable PersistentVolumePhase = "Available" + // used for PersistentVolumes that are bound + VolumeBound PersistentVolumePhase = "Bound" + // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted + // released volumes must be recycled before becoming available again + // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource + VolumeReleased PersistentVolumePhase = "Released" + // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim + VolumeFailed PersistentVolumePhase = "Failed" +) + +type PersistentVolumeClaimPhase string + +const ( + // used for PersistentVolumeClaims that are not yet bound + ClaimPending PersistentVolumeClaimPhase = "Pending" + // used for PersistentVolumeClaims that are bound + ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" +) + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +type HostPathVolumeSource struct { + // Path of the directory on the host. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + Path string `json:"path" protobuf:"bytes,1,opt,name=path"` +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +type EmptyDirVolumeSource struct { + // What type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // Must be an empty string (default) or Memory. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + // +optional + Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"` + // Total amount of local storage required for this EmptyDir volume. + // The size limit is also applicable for memory medium. + // The maximum usage on memory medium EmptyDir would be the minimum value between + // the SizeLimit specified here and the sum of memory limits of all containers in a pod. + // The default is nil which means that the limit is undefined. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + SizeLimit resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"` +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsVolumeSource struct { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` + + // Path is the Glusterfs volume path. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDVolumeSource struct { + // A collection of Ceph monitors. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` + // The rados image name. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // The rados pool name. + // Default is rbd. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` + // The rados user name. + // Default is admin. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` + // Keyring is the path to key ring for RBDUser. + // Default is /etc/ceph/keyring. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` + // SecretRef is name of the authentication secret for RBDUser. If provided + // overrides keyring. + // Default is nil. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` +} + +// Represents a cinder volume resource in Openstack. +// A Cinder volume must exist before mounting to a container. +// The volume must also be in the same region as the kubelet. +// Cinder volumes support ownership management and SELinux relabeling. +type CinderVolumeSource struct { + // volume id used to identify the volume in cinder + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` + // Optional: User is the rados user name, default is admin + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +type FlockerVolumeSource struct { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + DatasetName string `json:"datasetName,omitempty" protobuf:"bytes,1,opt,name=datasetName"` + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + DatasetUUID string `json:"datasetUUID,omitempty" protobuf:"bytes,2,opt,name=datasetUUID"` +} + +// StorageMedium defines ways that storage can be allocated to a volume. +type StorageMedium string + +const ( + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node + StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) +) + +// Protocol defines network protocols supported for things like container ports. +type Protocol string + +const ( + // ProtocolTCP is the TCP protocol. + ProtocolTCP Protocol = "TCP" + // ProtocolUDP is the UDP protocol. + ProtocolUDP Protocol = "UDP" +) + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +type GCEPersistentDiskVolumeSource struct { + // Unique name of the PD resource in GCE. Used to identify the disk in GCE. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +type QuobyteVolumeSource struct { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + Registry string `json:"registry" protobuf:"bytes,1,opt,name=registry"` + + // Volume is a string that references an already created Quobyte volume by name. + Volume string `json:"volume" protobuf:"bytes,2,opt,name=volume"` + + // ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + + // User to map volume access to + // Defaults to serivceaccount user + // +optional + User string `json:"user,omitempty" protobuf:"bytes,4,opt,name=user"` + + // Group to map volume access to + // Default is no group + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"` +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +type FlexVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` + // Optional: Extra command options if any. + // +optional + Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"` +} + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +type AWSElasticBlockStoreVolumeSource struct { + // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // +optional + Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` + // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". + // If omitted, the default is "false". + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +type GitRepoVolumeSource struct { + // Repository URL + Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"` + // Commit hash for the specified revision. + // +optional + Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"` + // Target directory name. + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"` +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +type SecretVolumeSource struct { + // Name of the secret in the pod's namespace to use. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"` + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"` + // Specify whether the Secret or it's keys must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +const ( + SecretVolumeSourceDefaultMode int32 = 0644 +) + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +type NFSVolumeSource struct { + // Server is the hostname or IP address of the NFS server. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + Server string `json:"server" protobuf:"bytes,1,opt,name=server"` + + // Path that is exported by the NFS server. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + + // ReadOnly here will force + // the NFS export to be mounted with read-only permissions. + // Defaults to false. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIVolumeSource struct { + // iSCSI target portal. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` + // Target iSCSI Qualified Name. + IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` + // iSCSI target lun number. + Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` + // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` + // whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"` + // whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"` + // CHAP secret for iSCSI target and initiator authentication + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"` +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +type FCVolumeSource struct { + // Required: FC target worldwide names (WWNs) + TargetWWNs []string `json:"targetWWNs" protobuf:"bytes,1,rep,name=targetWWNs"` + // Required: FC target lun number + Lun *int32 `json:"lun" protobuf:"varint,2,opt,name=lun"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFileVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"` + // Share Name + ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// Represents a vSphere volume resource. +type VsphereVirtualDiskVolumeSource struct { + // Path that identifies vSphere volume vmdk + VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Storage Policy Based Management (SPBM) profile name. + // +optional + StoragePolicyName string `json:"storagePolicyName,omitempty" protobuf:"bytes,3,opt,name=storagePolicyName"` + // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + // +optional + StoragePolicyID string `json:"storagePolicyID,omitempty" protobuf:"bytes,4,opt,name=storagePolicyID"` +} + +// Represents a Photon Controller persistent disk resource. +type PhotonPersistentDiskVolumeSource struct { + // ID that identifies Photon Controller persistent disk + PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` +} + +type AzureDataDiskCachingMode string +type AzureDataDiskKind string + +const ( + AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" + AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" + AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" + + AzureSharedBlobDisk AzureDataDiskKind = "Shared" + AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated" + AzureManagedDisk AzureDataDiskKind = "Managed" +) + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +type AzureDiskVolumeSource struct { + // The Name of the data disk in the blob storage + DiskName string `json:"diskName" protobuf:"bytes,1,opt,name=diskName"` + // The URI the data disk in the blob storage + DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"` + // Host Caching mode: None, Read Only, Read Write. + // +optional + CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"` + // Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"` +} + +// PortworxVolumeSource represents a Portworx volume resource. +type PortworxVolumeSource struct { + // VolumeID uniquely identifies a Portworx volume + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +type ScaleIOVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"` + // The name of the storage system as configured in ScaleIO. + System string `json:"system" protobuf:"bytes,2,opt,name=system"` + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"` + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"` + // The name of the Protection Domain for the configured storage (defaults to "default"). + // +optional + ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"` + // The Storage Pool associated with the protection domain (defaults to "default"). + // +optional + StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` + // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // +optional + StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"` +} + +// Represents a StorageOS persistent volume resource. +type StorageOSVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"` + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` +} + +// Represents a StorageOS persistent volume resource. +type StorageOSPersistentVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"` + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +type ConfigMapVolumeSource struct { + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"` + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +const ( + ConfigMapVolumeSourceDefaultMode int32 = 0644 +) + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"` + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"` +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` + // information about the downwardAPI data to project + DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"` + // information about the configMap data to project + ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"` +} + +const ( + ProjectedVolumeSourceDefaultMode int32 = 0644 +) + +// Maps a string key to a path within a volume. +type KeyToPath struct { + // The key to project. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"` +} + +// Local represents directly-attached storage with node affinity +type LocalVolumeSource struct { + // The full path to the volume on the node + // For alpha, this path must be a directory + // Once block as a source is supported, then this path can point to a block device + Path string `json:"path" protobuf:"bytes,1,opt,name=path"` +} + +// ContainerPort represents a network port in a single container. +type ContainerPort struct { + // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + // named port in a pod must have a unique name. Name for the port that can be + // referred to by services. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // Number of port to expose on the host. + // If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // Most containers do not need this. + // +optional + HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"` + // Number of port to expose on the pod's IP address. + // This must be a valid port number, 0 < x < 65536. + ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"` + // Protocol for port. Must be UDP or TCP. + // Defaults to "TCP". + // +optional + Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"` + // What host IP to bind the external port to. + // +optional + HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` +} + +// VolumeMount describes a mounting of a Volume within a container. +type VolumeMount struct { + // This must match the Name of a Volume. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Mounted read-only if true, read-write otherwise (false or unspecified). + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` + // Path within the container at which the volume should be mounted. Must + // not contain ':'. + MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"` + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"` +} + +// EnvVar represents an environment variable present in a Container. +type EnvVar struct { + // Name of the environment variable. Must be a C_IDENTIFIER. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Optional: no more than one of the following may be specified. + + // Variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // Defaults to "". + // +optional + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // Source for the environment variable's value. Cannot be used if value is not empty. + // +optional + ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"` +} + +// EnvVarSource represents a source for the value of an EnvVar. +type EnvVarSource struct { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. + // +optional + FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"` + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"` + // Selects a key of a ConfigMap. + // +optional + ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"` + // Selects a key of a secret in the pod's namespace + // +optional + SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"` +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +type ObjectFieldSelector struct { + // Version of the schema the FieldPath is written in terms of, defaults to "v1". + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` + // Path of the field to select in the specified API version. + FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"` +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +type ResourceFieldSelector struct { + // Container name: required for volumes, optional for env vars + // +optional + ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` + // Required: resource to select + Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"` + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"` +} + +// Selects a key from a ConfigMap. +type ConfigMapKeySelector struct { + // The ConfigMap to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // The key to select. + Key string `json:"key" protobuf:"bytes,2,opt,name=key"` + // Specify whether the ConfigMap or it's key must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` +} + +// SecretKeySelector selects a key of a Secret. +type SecretKeySelector struct { + // The name of the secret in the pod's namespace to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // The key of the secret to select from. Must be a valid secret key. + Key string `json:"key" protobuf:"bytes,2,opt,name=key"` + // Specify whether the Secret or it's key must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` +} + +// EnvFromSource represents the source of a set of ConfigMaps +type EnvFromSource struct { + // An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"` + // The ConfigMap to select from + // +optional + ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"` + // The Secret to select from + // +optional + SecretRef *SecretEnvSource `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"` +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // Specify whether the Secret must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"` +} + +// HTTPHeader describes a custom header to be used in HTTP probes +type HTTPHeader struct { + // The header field name + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The header field value + Value string `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +// HTTPGetAction describes an action based on HTTP Get requests. +type HTTPGetAction struct { + // Path to access on the HTTP server. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + // Name or number of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"` + // Host name to connect to, defaults to the pod IP. You probably want to set + // "Host" in httpHeaders instead. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"` + // Scheme to use for connecting to the host. + // Defaults to HTTP. + // +optional + Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"` + // Custom headers to set in the request. HTTP allows repeated headers. + // +optional + HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"` +} + +// URIScheme identifies the scheme used for connection to a host for Get actions +type URIScheme string + +const ( + // URISchemeHTTP means that the scheme used will be http:// + URISchemeHTTP URIScheme = "HTTP" + // URISchemeHTTPS means that the scheme used will be https:// + URISchemeHTTPS URIScheme = "HTTPS" +) + +// TCPSocketAction describes an action based on opening a socket +type TCPSocketAction struct { + // Number or name of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"` + // Optional: Host name to connect to, defaults to the pod IP. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"` +} + +// ExecAction describes a "run in container" action. +type ExecAction struct { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + // +optional + Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // The action taken to determine the health of a container + Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"` + // Number of seconds after the container has started before liveness probes are initiated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"` + // Number of seconds after which the probe times out. + // Defaults to 1 second. Minimum value is 1. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"` + // How often (in seconds) to perform the probe. + // Default to 10 seconds. Minimum value is 1. + // +optional + PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"` + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Defaults to 1. Must be 1 for liveness. Minimum value is 1. + // +optional + SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"` + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // Defaults to 3. Minimum value is 1. + // +optional + FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"` +} + +// PullPolicy describes a policy for if/when to pull a container image +type PullPolicy string + +const ( + // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. + PullAlways PullPolicy = "Always" + // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present + PullNever PullPolicy = "Never" + // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. + PullIfNotPresent PullPolicy = "IfNotPresent" +) + +// TerminationMessagePolicy describes how termination messages are retrieved from a container. +type TerminationMessagePolicy string + +const ( + // TerminationMessageReadFile is the default behavior and will set the container status message to + // the contents of the container's terminationMessagePath when the container exits. + TerminationMessageReadFile TerminationMessagePolicy = "File" + // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs + // for the container status message when the container exits with an error and the + // terminationMessagePath has no contents. + TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" +) + +// Capability represent POSIX capabilities type +type Capability string + +// Adds and removes POSIX capabilities from running containers. +type Capabilities struct { + // Added capabilities + // +optional + Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"` + // Removed capabilities + // +optional + Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"` +} + +// ResourceRequirements describes the compute resource requirements. +type ResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + // +optional + Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + // +optional + Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` +} + +const ( + // TerminationMessagePathDefault means the default path to capture the application termination message running in a container + TerminationMessagePathDefault string = "/dev/termination-log" +) + +// A single application container that you want to run within a pod. +type Container struct { + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + Image string `json:"image" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // List of ports to expose from the container. Exposing a port here gives + // the system additional information about the network connections a + // container uses, but is primarily informational. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Cannot be updated. + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + // +optional + Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + // +optional + Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // Security options the pod should run with. + // More info: https://kubernetes.io/docs/concepts/policy/security-context/ + // More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md + // +optional + SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +type Handler struct { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"` + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"` + // TCPSocket specifies an action involving a TCP port. + // TCP hooks not yet supported + // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +type Lifecycle struct { + // PostStart is called immediately after a container is created. If the handler fails, + // the container is terminated and restarted according to its restart policy. + // Other management of the container blocks until the hook completes. + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + // +optional + PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` + // PreStop is called immediately before a container is terminated. + // The container is terminated after the handler completes. + // The reason for termination is passed to the handler. + // Regardless of the outcome of the handler, the container is eventually terminated. + // Other management of the container blocks until the hook completes. + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + // +optional + PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` +} + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// ContainerStateWaiting is a waiting state of a container. +type ContainerStateWaiting struct { + // (brief) reason the container is not yet running. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"` + // Message regarding why the container is not yet running. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` +} + +// ContainerStateRunning is a running state of a container. +type ContainerStateRunning struct { + // Time at which the container was last (re-)started + // +optional + StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"` +} + +// ContainerStateTerminated is a terminated state of a container. +type ContainerStateTerminated struct { + // Exit status from the last termination of the container + ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"` + // Signal from the last termination of the container + // +optional + Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"` + // (brief) reason from the last termination of the container + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // Message regarding the last termination of the container + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + // Time at which previous execution of the container started + // +optional + StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"` + // Time at which the container last terminated + // +optional + FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"` + // Container's ID in the format 'docker://' + // +optional + ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"` +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +type ContainerState struct { + // Details about a waiting container + // +optional + Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"` + // Details about a running container + // +optional + Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"` + // Details about a terminated container + // +optional + Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"` +} + +// ContainerStatus contains details for the current status of this container. +type ContainerStatus struct { + // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Cannot be updated. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Details about the container's current condition. + // +optional + State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"` + // Details about the container's last termination condition. + // +optional + LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"` + // Specifies whether the container has passed its readiness probe. + Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"` + // The number of times the container has been restarted, currently based on + // the number of dead containers that have not yet been removed. + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"` + // The image the container is running. + // More info: https://kubernetes.io/docs/concepts/containers/images + // TODO(dchen1107): Which image the container is running with? + Image string `json:"image" protobuf:"bytes,6,opt,name=image"` + // ImageID of the container's image. + ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"` + // Container's ID in the format 'docker://'. + // +optional + ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` +} + +// PodPhase is a label for the condition of a pod at the current time. +type PodPhase string + +// These are the valid statuses of pods. +const ( + // PodPending means the pod has been accepted by the system, but one or more of the containers + // has not been started. This includes time before being bound to a node, as well as time spent + // pulling images onto the host. + PodPending PodPhase = "Pending" + // PodRunning means the pod has been bound to a node and all of the containers have been started. + // At least one container is still running or is in the process of being restarted. + PodRunning PodPhase = "Running" + // PodSucceeded means that all containers in the pod have voluntarily terminated + // with a container exit code of 0, and the system is not going to restart any of these containers. + PodSucceeded PodPhase = "Succeeded" + // PodFailed means that all containers in the pod have terminated, and at least one container has + // terminated in a failure (exited with a non-zero exit code or was stopped by the system). + PodFailed PodPhase = "Failed" + // PodUnknown means that for some reason the state of the pod could not be obtained, typically due + // to an error in communicating with the host of the pod. + PodUnknown PodPhase = "Unknown" +) + +// PodConditionType is a valid value for PodCondition.Type +type PodConditionType string + +// These are valid conditions of pod. +const ( + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" + // PodReady means the pod is able to service requests and should be added to the + // load balancing pools of all matching services. + PodReady PodConditionType = "Ready" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" + // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler + // can't schedule the pod right now, for example due to insufficient resources in the cluster. + PodReasonUnschedulable = "Unschedulable" +) + +// PodCondition contains details for the current condition of this pod. +type PodCondition struct { + // Type is the type of the condition. + // Currently only Ready. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"` + // Status is the status of the condition. + // Can be True, False, Unknown. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// RestartPolicy describes how the container should be restarted. +// Only one of the following restart policies may be specified. +// If none of the following policies is specified, the default one +// is RestartPolicyAlways. +type RestartPolicy string + +const ( + RestartPolicyAlways RestartPolicy = "Always" + RestartPolicyOnFailure RestartPolicy = "OnFailure" + RestartPolicyNever RestartPolicy = "Never" +) + +// DNSPolicy defines how a pod's DNS will be configured. +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst indicates that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault indicates that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" + + DefaultTerminationGracePeriodSeconds = 30 +) + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +type NodeSelector struct { + //Required. A list of node selector terms. The terms are ORed. + NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"` +} + +// A null or empty node selector term matches no objects. +type NodeSelectorTerm struct { + //Required. A list of node selector requirements. The requirements are ANDed. + MatchExpressions []NodeSelectorRequirement `json:"matchExpressions" protobuf:"bytes,1,rep,name=matchExpressions"` +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +type NodeSelectorRequirement struct { + // The label key that the selector applies to. + // +patchMergeKey=key + // +patchStrategy=merge + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"` + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A node selector operator is the set of operators that can be used in +// a node selector requirement. +type NodeSelectorOperator string + +const ( + NodeSelectorOpIn NodeSelectorOperator = "In" + NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" + NodeSelectorOpExists NodeSelectorOperator = "Exists" + NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" + NodeSelectorOpGt NodeSelectorOperator = "Gt" + NodeSelectorOpLt NodeSelectorOperator = "Lt" +) + +// Affinity is a group of affinity scheduling rules. +type Affinity struct { + // Describes node affinity scheduling rules for the pod. + // +optional + NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"` + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"` + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"` +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"` +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key tches that of any node on which +// a pod of the set of pods is running +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"` + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" + // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); + // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. + // +optional + TopologyKey string `json:"topologyKey,omitempty" protobuf:"bytes,3,opt,name=topologyKey"` +} + +// Node affinity is a group of node affinity scheduling rules. +type NodeAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // will try to eventually evict the pod from its node. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +type PreferredSchedulingTerm struct { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` + // A node selector term, associated with the corresponding weight. + Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"` +} + +// The node this Taint is attached to has the effect "effect" on +// any pod that that does not tolerate the Taint. +type Taint struct { + // Required. The taint key to be applied to a node. + // +patchMergeKey=key + // +patchStrategy=merge + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // Required. The taint value corresponding to the taint key. + // +optional + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"` + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + TimeAdded metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"` +} + +type TaintEffect string + +const ( + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // but allow all pods submitted to Kubelet without going through the scheduler + // to start, and allow all already-running pods to continue running. + // Enforced by the scheduler. + TaintEffectNoSchedule TaintEffect = "NoSchedule" + // Like TaintEffectNoSchedule, but the scheduler tries not to schedule + // new pods onto the node, rather than prohibiting new pods from scheduling + // onto the node entirely. Enforced by the scheduler. + TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to + // Kubelet without going through the scheduler to start. + // Enforced by Kubelet and the scheduler. + // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" + // Evict any already-running pods that do not tolerate the taint. + // Currently enforced by NodeController. + TaintEffectNoExecute TaintEffect = "NoExecute" +) + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +type Toleration struct { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + // +patchMergeKey=key + // +patchStrategy=merge + Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"` + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"` + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"` + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"` +} + +// A toleration operator is the set of operators that can be used in a toleration. +type TolerationOperator string + +const ( + TolerationOpExists TolerationOperator = "Exists" + TolerationOpEqual TolerationOperator = "Equal" +) + +const ( + // This annotation key will be used to contain an array of v1 JSON encoded Containers + // for init containers. The annotation will be placed into the internal type and cleared. + // This key is only recognized by version >= 1.4. + PodInitContainersBetaAnnotationKey = "pod.beta.kubernetes.io/init-containers" + // This annotation key will be used to contain an array of v1 JSON encoded Containers + // for init containers. The annotation will be placed into the internal type and cleared. + // This key is recognized by version >= 1.3. For version 1.4 code, this key + // will have its value copied to the beta key. + PodInitContainersAnnotationKey = "pod.alpha.kubernetes.io/init-containers" + // This annotation key will be used to contain an array of v1 JSON encoded + // ContainerStatuses for init containers. The annotation will be placed into the internal + // type and cleared. This key is only recognized by version >= 1.4. + PodInitContainerStatusesBetaAnnotationKey = "pod.beta.kubernetes.io/init-container-statuses" + // This annotation key will be used to contain an array of v1 JSON encoded + // ContainerStatuses for init containers. The annotation will be placed into the internal + // type and cleared. This key is recognized by version >= 1.3. For version 1.4 code, + // this key will have its value copied to the beta key. + PodInitContainerStatusesAnnotationKey = "pod.alpha.kubernetes.io/init-container-statuses" +) + +// PodSpec is a description of a pod. +type PodSpec struct { + // List of volumes that can be mounted by containers belonging to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` + // List of initialization containers belonging to the pod. + // Init containers are executed in order prior to containers being started. If any + // init container fails, the pod is considered to have failed and is handled according + // to its restartPolicy. The name for an init container or normal container must be + // unique among all containers. + // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // The resourceRequirements of an init container are taken into account during scheduling + // by finding the highest request/limit for each resource type, and then using the max of + // of that value or the sum of the normal containers. Limits are applied to init containers + // in a similar fashion. + // Init containers cannot currently be added or removed. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // +patchMergeKey=name + // +patchStrategy=merge + InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"` + // List of containers belonging to the pod. + // Containers cannot currently be added or removed. + // There must be at least one container in a Pod. + // Cannot be updated. + // +patchMergeKey=name + // +patchStrategy=merge + Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` + // Restart policy for all containers within the pod. + // One of Always, OnFailure, Never. + // Default to Always. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + // +optional + RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"` + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"` + // Optional duration in seconds the pod may be active on the node relative to + // StartTime before the system will actively try to mark it failed and kill associated containers. + // Value must be a positive integer. + // +optional + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"` + // Set DNS policy for containers within the pod. + // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. + // Defaults to "ClusterFirst". + // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + // +optional + DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` + // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + // Deprecated: Use serviceAccountName instead. + // +k8s:conversion-gen=false + // +optional + DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"` + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"` + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` + // Host networking requested for this pod. Use the host's network namespace. + // If this option is set, the ports that will be used must be specified. + // Default to false. + // +k8s:conversion-gen=false + // +optional + HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"` + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"` + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"` + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"` + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"` + // Specifies the hostname of the Pod + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"` + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + // +optional + Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"` + // If specified, the pod's scheduling constraints + // +optional + Affinity *Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"` + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"` + // If specified, the pod's tolerations. + // +optional + Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"` + // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + // file if specified. This is only valid for non-hostNetwork pods. + // +optional + // +patchMergeKey=IP + // +patchStrategy=merge + HostAliases []HostAlias `json:"hostMappings,omitempty" patchStrategy:"merge" patchMergeKey:"IP" protobuf:"bytes,23,rep,name=hostMappings"` +} + +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +type HostAlias struct { + // IP address of the host file entry. + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + // Hostnames for the above IP address. + Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"` +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +type PodSecurityContext struct { + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"` + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsUser *types.UnixUserID `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser,casttype=k8s.io/apimachinery/pkg/types.UnixUserID"` + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"` + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + SupplementalGroups []types.UnixGroupID `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups,casttype=k8s.io/apimachinery/pkg/types.UnixGroupID"` + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + FSGroup *types.UnixGroupID `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup,casttype=k8s.io/apimachinery/pkg/types.UnixGroupID"` +} + +// PodQOSClass defines the supported qos classes of Pods. +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system. +type PodStatus struct { + // Current condition of the pod. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase + // +optional + Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"` + // Current service state of pod. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` + // A human readable message indicating details about why the pod is in this condition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // A brief CamelCase message indicating details about why the pod is in this state. + // e.g. 'OutOfDisk' + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + + // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + // +optional + HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` + // IP address allocated to the pod. Routable at least within the cluster. + // Empty if not yet allocated. + // +optional + PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` + + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"` + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"` + + // The list has one entry per container in the manifest. Each entry is currently the output + // of `docker inspect`. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + // +optional + ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` + // The Quality of Service (QOS) classification assigned to the pod based on resource requirements + // See PodQOSClass type for available QOS classes + // More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md + // +optional + QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` +} + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +type PodStatusResult struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// +genclient=true + +// Pod is a collection of containers that can run on a host. This resource is created +// by clients and scheduled onto hosts. +type Pod struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the pod. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodList is a list of Pods. +type PodList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of pods. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md + Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PodTemplateSpec describes the data a pod should have when created from a template +type PodTemplateSpec struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the pod. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +genclient=true + +// PodTemplate describes a template for creating copies of a predefined pod. +type PodTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Template defines the pods that will be created from this pod template. + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` +} + +// PodTemplateList is a list of PodTemplates. +type PodTemplateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of pod templates + Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ReplicationControllerSpec is the specification of a replication controller. +type ReplicationControllerSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // Selector is a label query over pods that should match the Replicas count. + // If Selector is empty, it is defaulted to the labels present on the Pod template. + // Label keys and values that must match in order to be controlled by this replication + // controller, if empty defaulted to labels on Pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // TemplateRef is a reference to an object that describes the pod that will be created if + // insufficient replicas are detected. + // Reference to an object that describes the pod that will be created if insufficient replicas are detected. + // +optional + // TemplateRef *ObjectReference `json:"templateRef,omitempty"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. This takes precedence over a TemplateRef. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +type ReplicationControllerStatus struct { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` + + // The number of ready replicas for this replication controller. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + + // ObservedGeneration reflects the generation of the most recently observed replication controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + + // Represents the latest available observations of a replication controller's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type ReplicationControllerConditionType string + +// These are valid conditions of a replication controller. +const ( + // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods + // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, + // etc. or deleted due to kubelet being down or finalizers are failing. + ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" +) + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +type ReplicationControllerCondition struct { + // Type of replication controller condition. + Type ReplicationControllerConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicationControllerConditionType"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient=true + +// ReplicationController represents the configuration of a replication controller. +type ReplicationController struct { + metav1.TypeMeta `json:",inline"` + + // If the Labels of a ReplicationController are empty, they are defaulted to + // be the same as the Pod(s) that the replication controller manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the specification of the desired behavior of the replication controller. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the most recently observed status of the replication controller. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ReplicationControllerList is a collection of replication controllers. +type ReplicationControllerList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of replication controllers. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Session Affinity Type string +type ServiceAffinity string + +const ( + // ServiceAffinityClientIP is the Client IP based. + ServiceAffinityClientIP ServiceAffinity = "ClientIP" + + // ServiceAffinityNone - no session affinity. + ServiceAffinityNone ServiceAffinity = "None" +) + +// Service Type string describes ingress methods for a service +type ServiceType string + +const ( + // ServiceTypeClusterIP means a service will only be accessible inside the + // cluster, via the cluster IP. + ServiceTypeClusterIP ServiceType = "ClusterIP" + + // ServiceTypeNodePort means a service will be exposed on one port of + // every node, in addition to 'ClusterIP' type. + ServiceTypeNodePort ServiceType = "NodePort" + + // ServiceTypeLoadBalancer means a service will be exposed via an + // external load balancer (if the cloud provider supports it), in addition + // to 'NodePort' type. + ServiceTypeLoadBalancer ServiceType = "LoadBalancer" + + // ServiceTypeExternalName means a service consists of only a reference to + // an external name that kubedns or equivalent will return as a CNAME + // record, with no exposing or proxying of any pods involved. + ServiceTypeExternalName ServiceType = "ExternalName" +) + +// Service External Traffic Policy Type string +type ServiceExternalTrafficPolicyType string + +const ( + // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. + ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" + // ServiceExternalTrafficPolicyTypeCluster specifies node-global (legacy) behavior. + ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" +) + +// ServiceStatus represents the current status of a service. +type ServiceStatus struct { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` +} + +// LoadBalancerStatus represents the status of a load-balancer. +type LoadBalancerStatus struct { + // Ingress is a list containing ingress points for the load-balancer. + // Traffic intended for the service should be sent to these ingress points. + // +optional + Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +type LoadBalancerIngress struct { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` +} + +// ServiceSpec describes the attributes that a user creates on a service. +type ServiceSpec struct { + // The list of ports that are exposed by this service. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +patchMergeKey=port + // +patchStrategy=merge + Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"` + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // clusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"` + + // type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types + // +optional + Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` + + // externalIPs is a list of IP addresses for which nodes in the cluster + // will also accept traffic for this service. These IPs are not managed by + // Kubernetes. The user is responsible for ensuring that traffic arrives + // at a node with this IP. A common example is external load-balancers + // that are not part of the Kubernetes system. + // +optional + ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"` + + // Supports "ClientIP" and "None". Used to maintain session affinity. + // Enable client IP based session affinity. + // Must be ClientIP or None. + // Defaults to None. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"` + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"` + + // If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ + // +optional + LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` + + // externalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid DNS name and requires Type to be ExternalName. + // +optional + ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"` + + // externalTrafficPolicy denotes if this Service desires to route external + // traffic to node-local or cluster-wide endpoints. "Local" preserves the + // client source IP and avoids a second hop for LoadBalancer and Nodeport + // type services, but risks potentially imbalanced traffic spreading. + // "Cluster" obscures the client source IP and may cause a second hop to + // another node, but should have good overall load-spreading. + // +optional + ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` + + // healthCheckNodePort specifies the healthcheck nodePort for the service. + // If not specified, HealthCheckNodePort is created by the service api + // backend with the allocated nodePort. Will use user-specified nodePort value + // if specified by the client. Only effects when Type is set to LoadBalancer + // and ExternalTrafficPolicy is set to Local. + // +optional + HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"` +} + +// ServicePort contains information on service's port. +type ServicePort struct { + // The name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. + // Optional if only one ServicePort is defined on this service. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // The IP protocol for this port. Supports "TCP" and "UDP". + // Default is TCP. + // +optional + Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` + + // The port that will be exposed by this service. + Port int32 `json:"port" protobuf:"varint,3,opt,name=port"` + + // Number or name of the port to access on the pods targeted by the service. + // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + // If this is a string, it will be looked up as a named port in the + // target Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + // +optional + TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"` + + // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. + // Usually assigned by the system. If specified, it will be allocated to the service + // if unused or else creation of the service will fail. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + // +optional + NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"` +} + +// +genclient=true + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +type Service struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of a service. + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the service. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +const ( + // ClusterIPNone - do not assign a cluster IP + // no proxying required and no environment variables should be created for pods + ClusterIPNone = "None" +) + +// ServiceList holds a list of services. +type ServiceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of services + Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +type ServiceAccount struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. + // More info: https://kubernetes.io/docs/concepts/configuration/secret + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"` + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + // +optional + ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"` + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"` +} + +// ServiceAccountList is a list of ServiceAccount objects +type ServiceAccountList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ServiceAccounts. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +type Endpoints struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The set of all endpoints is the union of all subsets. Addresses are placed into + // subsets according to the IPs they share. A single address with multiple ports, + // some of which are ready and some of which are not (because they come from + // different containers) will result in the address being displayed in different + // subsets for the different ports. No address will appear in both Addresses and + // NotReadyAddresses in the same subset. + // Sets of addresses and ports that comprise a service. + Subsets []EndpointSubset `json:"subsets" protobuf:"bytes,2,rep,name=subsets"` +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +type EndpointSubset struct { + // IP addresses which offer the related ports that are marked as ready. These endpoints + // should be considered safe for load balancers and clients to utilize. + // +optional + Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"` + // IP addresses which offer the related ports but are not currently marked as ready + // because they have not yet finished starting, have recently failed a readiness check, + // or have recently failed a liveness check. + // +optional + NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"` + // Port numbers available on the related IP addresses. + // +optional + Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"` +} + +// EndpointAddress is a tuple that describes single IP address. +type EndpointAddress struct { + // The IP of this endpoint. + // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), + // or link-local multicast ((224.0.0.0/24). + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, See #4447. + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` + // The Hostname of this endpoint + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,4,opt,name=nodeName"` + // Reference to object providing the endpoint. + // +optional + TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"` +} + +// EndpointPort is a tuple that describes a single port. +type EndpointPort struct { + // The name of this port (corresponds to ServicePort.Name). + // Must be a DNS_LABEL. + // Optional only if one port is defined. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // The port number of the endpoint. + Port int32 `json:"port" protobuf:"varint,2,opt,name=port"` + + // The IP protocol for this port. + // Must be UDP or TCP. + // Default is TCP. + // +optional + Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"` +} + +// EndpointsList is a list of endpoints. +type EndpointsList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of endpoints. + Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// NodeSpec describes the attributes that a node is created with. +type NodeSpec struct { + // PodCIDR represents the pod IP range assigned to the node. + // +optional + PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"` + // External ID of the node assigned by some machine database (e.g. a cloud provider). + // Deprecated. + // +optional + ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"` + // ID of the node assigned by the cloud provider in the format: :// + // +optional + ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` + // Unschedulable controls node schedulability of new pods. By default, node is schedulable. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration + // +optional + Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"` + // If specified, the node's taints. + // +optional + Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"` +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +type DaemonEndpoint struct { + /* + The port tag was not properly in quotes in earlier releases, so it must be + uppercased for backwards compat (since it was falling back to var name of + 'Port'). + */ + + // Port number of the given endpoint. + Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"` +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +type NodeDaemonEndpoints struct { + // Endpoint on which Kubelet is listening. + // +optional + KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"` +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +type NodeSystemInfo struct { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is preferred. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"` + // SystemUUID reported by the node. For unique machine identification + // MachineID is preferred. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"` + // Boot ID reported by the node. + BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"` + // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). + KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"` + // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). + OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"` + // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). + ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"` + // Kubelet Version reported by the node. + KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"` + // KubeProxy Version reported by the node. + KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"` + // The Operating System reported by the node + OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"` + // The Architecture reported by the node + Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"` +} + +// NodeStatus is information about the current status of a node. +type NodeStatus struct { + // Capacity represents the total resources of a node. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // +optional + Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` + // Allocatable represents the resources of a node that are available for scheduling. + // Defaults to Capacity. + // +optional + Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"` + // NodePhase is the recently observed lifecycle phase of the node. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase + // The field is never populated, and now is deprecated. + // +optional + Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"` + // Conditions is an array of current observed node conditions. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#condition + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` + // List of addresses reachable to the node. + // Queried from cloud provider, if available. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"` + // Endpoints of daemons running on the Node. + // +optional + DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"` + // Set of ids/uuids to uniquely identify the node. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#info + // +optional + NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"` + // List of container images on this node + // +optional + Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"` + // List of attachable volumes in use (mounted) by the node. + // +optional + VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"` + // List of volumes that are attached to the node. + // +optional + VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"` +} + +type UniqueVolumeName string + +// AttachedVolume describes a volume attached to a node +type AttachedVolume struct { + // Name of the attached volume + Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"` + + // DevicePath represents the device path where the volume should be available + DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"` +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +type AvoidPods struct { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"` +} + +// Describes a class of pods that should avoid this node. +type PreferAvoidPodsEntry struct { + // The class of pods. + PodSignature PodSignature `json:"podSignature" protobuf:"bytes,1,opt,name=podSignature"` + // Time at which this entry was added to the list. + // +optional + EvictionTime metav1.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"` + // (brief) reason why this entry was added to the list. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // Human readable message indicating why this entry was added to the list. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +type PodSignature struct { + // Reference to controller whose pods should avoid this node. + // +optional + PodController *metav1.OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"` +} + +// Describe a container image +type ContainerImage struct { + // Names by which this image is known. + // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + Names []string `json:"names" protobuf:"bytes,1,rep,name=names"` + // The size of the image in bytes. + // +optional + SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"` +} + +type NodePhase string + +// These are the valid phases of node. +const ( + // NodePending means the node has been created/added by the system, but not configured. + NodePending NodePhase = "Pending" + // NodeRunning means the node has been configured and has Kubernetes components running. + NodeRunning NodePhase = "Running" + // NodeTerminated means the node has been removed from the cluster. + NodeTerminated NodePhase = "Terminated" +) + +type NodeConditionType string + +// These are valid conditions of node. Currently, we don't have enough information to decide +// node condition. In the future, we will add more. The proposed set of conditions are: +// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable. +const ( + // NodeReady means kubelet is healthy and ready to accept pods. + NodeReady NodeConditionType = "Ready" + // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk + // space on the node. + NodeOutOfDisk NodeConditionType = "OutOfDisk" + // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. + NodeMemoryPressure NodeConditionType = "MemoryPressure" + // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. + NodeDiskPressure NodeConditionType = "DiskPressure" + // NodeNetworkUnavailable means that network for the node is not correctly configured. + NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" + // NodeInodePressure means the kubelet is under pressure due to insufficient available inodes. + NodeInodePressure NodeConditionType = "InodePressure" +) + +// NodeCondition contains condition information for a node. +type NodeCondition struct { + // Type of node condition. + Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time we got an update on a given condition. + // +optional + LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"` + // Last time the condition transit from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +type NodeAddressType string + +// These are valid address type of node. +const ( + NodeHostName NodeAddressType = "Hostname" + NodeExternalIP NodeAddressType = "ExternalIP" + NodeInternalIP NodeAddressType = "InternalIP" + NodeExternalDNS NodeAddressType = "ExternalDNS" + NodeInternalDNS NodeAddressType = "InternalDNS" +) + +// NodeAddress contains information for the node's address. +type NodeAddress struct { + // Node address type, one of Hostname, ExternalIP or InternalIP. + Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"` + // The node address. + Address string `json:"address" protobuf:"bytes,2,opt,name=address"` +} + +// ResourceName is the name identifying various resources in a ResourceList. +type ResourceName string + +// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, +// with the -, _, and . characters allowed anywhere, except the first or last character. +// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than +// camel case, separating compound words. +// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. +const ( + // CPU, in cores. (500m = .5 cores) + ResourceCPU ResourceName = "cpu" + // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceMemory ResourceName = "memory" + // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) + ResourceStorage ResourceName = "storage" + // Local Storage for container overlay filesystem, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // The resource name for ResourceStorageOverlay is alpha and it can change across releases. + ResourceStorageOverlay ResourceName = "storage.kubernetes.io/overlay" + // Local Storage for scratch space, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // The resource name for ResourceStorageScratch is alpha and it can change across releases. + ResourceStorageScratch ResourceName = "storage.kubernetes.io/scratch" + // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. + ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" + // Number of Pods that may be running on this Node: see ResourcePods +) + +const ( + // Namespace prefix for opaque counted resources (alpha). + ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" +) + +// ResourceList is a set of (resource name, quantity) pairs. +type ResourceList map[ResourceName]resource.Quantity + +// +genclient=true +// +nonNamespaced=true + +// Node is a worker node in Kubernetes. +// Each node will have a unique identifier in the cache (i.e. in etcd). +type Node struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of a node. + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the node. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// NodeList is the whole list of all Nodes which have been registered with master. +type NodeList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of nodes + Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// FinalizerName is the name identifying a finalizer during namespace lifecycle. +type FinalizerName string + +// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or +// in metav1. +const ( + FinalizerKubernetes FinalizerName = "kubernetes" +) + +// NamespaceSpec describes the attributes on a Namespace. +type NamespaceSpec struct { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. + // More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#finalizers + // +optional + Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"` +} + +// NamespaceStatus is information about the current status of a Namespace. +type NamespaceStatus struct { + // Phase is the current lifecycle phase of the namespace. + // More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#phases + // +optional + Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"` +} + +type NamespacePhase string + +// These are the valid phases of a namespace. +const ( + // NamespaceActive means the namespace is available for use in the system + NamespaceActive NamespacePhase = "Active" + // NamespaceTerminating means the namespace is undergoing graceful termination + NamespaceTerminating NamespacePhase = "Terminating" +) + +// +genclient=true +// +nonNamespaced=true + +// Namespace provides a scope for Names. +// Use of multiple namespaces is optional. +type Namespace struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of the Namespace. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status describes the current status of a Namespace. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// NamespaceList is a list of Namespaces. +type NamespaceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Namespace objects in the list. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. +// Deprecated in 1.7, please use the bindings subresource of pods instead. +type Binding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The target object that you want to bind to the standard object. + Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"` +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +// +k8s:openapi-gen=false +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` +} + +// DeletionPropagation decides if a deletion will propagate to the dependents of the object, and how the garbage collector will handle the propagation. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store. + // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp. + // This policy is cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + +// DeleteOptions may be provided when deleting an API object +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +// +k8s:openapi-gen=false +type DeleteOptions struct { + metav1.TypeMeta `json:",inline"` + + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"` + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"` + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"` + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + PropagationPolicy *DeletionPropagation `protobuf:"bytes,4,opt,name=propagationPolicy,casttype=DeletionPropagation"` +} + +// ListOptions is the query options to a standard REST list call. +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +// +k8s:openapi-gen=false +type ListOptions struct { + metav1.TypeMeta `json:",inline"` + + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` + // If true, partially initialized resources are included in the response. + // +optional + IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,6,opt,name=includeUninitialized"` + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` + // Timeout for the list/watch call. + // +optional + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"` +} + +// PodLogOptions is the query options for a Pod's logs REST call. +type PodLogOptions struct { + metav1.TypeMeta `json:",inline"` + + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + // +optional + Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` + // Follow the log stream of the pod. Defaults to false. + // +optional + Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` + // Return previous terminated container logs. Defaults to false. + // +optional + Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"` + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"` + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + // +optional + Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + // +optional + TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + // +optional + LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` +} + +// PodAttachOptions is the query options to a Pod's remote attach call. +// --- +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +type PodAttachOptions struct { + metav1.TypeMeta `json:",inline"` + + // Stdin if true, redirects the standard input stream of the pod for this call. + // Defaults to false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"` + + // Stdout if true indicates that stdout is to be redirected for the attach call. + // Defaults to true. + // +optional + Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"` + + // Stderr if true indicates that stderr is to be redirected for the attach call. + // Defaults to true. + // +optional + Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"` + + // TTY if true indicates that a tty will be allocated for the attach call. + // This is passed through the container runtime so the tty + // is allocated on the worker node by the container runtime. + // Defaults to false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"` + + // The container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` +} + +// PodExecOptions is the query options to a Pod's remote exec call. +// --- +// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +type PodExecOptions struct { + metav1.TypeMeta `json:",inline"` + + // Redirect the standard input stream of the pod for this call. + // Defaults to false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"` + + // Redirect the standard output stream of the pod for this call. + // Defaults to true. + // +optional + Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"` + + // Redirect the standard error stream of the pod for this call. + // Defaults to true. + // +optional + Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"` + + // TTY if true indicates that a tty will be allocated for the exec call. + // Defaults to false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"` + + // Container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` + + // Command is the remote command to execute. argv array. Not executed within a shell. + Command []string `json:"command" protobuf:"bytes,6,rep,name=command"` +} + +// PodPortForwardOptions is the query options to a Pod's port forward call +// when using WebSockets. +// The `port` query parameter must specify the port or +// ports (comma separated) to forward over. +// Port forwarding over SPDY does not use these options. It requires the port +// to be passed in the `port` header as part of request. +type PodPortForwardOptions struct { + metav1.TypeMeta `json:",inline"` + + // List of ports to forward + // Required when using WebSockets + // +optional + Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"` +} + +// PodProxyOptions is the query options to a Pod's proxy call. +type PodProxyOptions struct { + metav1.TypeMeta `json:",inline"` + + // Path is the URL path to use for the current proxy request to pod. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` +} + +// NodeProxyOptions is the query options to a Node's proxy call. +type NodeProxyOptions struct { + metav1.TypeMeta `json:",inline"` + + // Path is the URL path to use for the current proxy request to node. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` +} + +// ServiceProxyOptions is the query options to a Service's proxy call. +type ServiceProxyOptions struct { + metav1.TypeMeta `json:",inline"` + + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` + // Namespace of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` + // UID of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // API version of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` + // Specific resourceVersion to which this reference is made, if any. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` + + // If referring to a piece of an object instead of an entire object, this string + // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + // For example, if the object reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"` +} + +// LocalObjectReference contains enough information to let you locate the +// referenced object inside the same namespace. +type LocalObjectReference struct { + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // TODO: Add other useful fields. apiVersion, kind, uid? + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` +} + +// SerializedReference is a reference to serialized object. +type SerializedReference struct { + metav1.TypeMeta `json:",inline"` + // The reference to an object in the system. + // +optional + Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"` +} + +// EventSource contains information for an event. +type EventSource struct { + // Component from which the event is generated. + // +optional + Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"` + // Node name on which the event is generated. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"` +} + +// Valid values for event types (new types could be added in future) +const ( + // Information only and will not cause any problems + EventTypeNormal string = "Normal" + // These events are to warn that something might go wrong + EventTypeWarning string = "Warning" +) + +// +genclient=true + +// Event is a report of an event somewhere in the cluster. +// TODO: Decide whether to store these separately or with the object they apply to. +type Event struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // The object that this event is about. + InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"` + + // This should be a short, machine understandable string that gives the reason + // for the transition into the object's current status. + // TODO: provide exact specification for format. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + + // A human-readable description of the status of this operation. + // TODO: decide on maximum length. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + + // The component reporting this event. Should be a short machine understandable string. + // +optional + Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"` + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"` + + // The time at which the most recent occurrence of this event was recorded. + // +optional + LastTimestamp metav1.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"` + + // The number of times this event has occurred. + // +optional + Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"` + + // Type of this event (Normal, Warning), new types could be added in the future + // +optional + Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"` +} + +// EventList is a list of events. +type EventList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of events + Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// List holds a list of objects, which may not be known by the server. +type List struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of objects + Items []runtime.RawExtension `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// LimitType is a type of object that is limited +type LimitType string + +const ( + // Limit that applies to all pods in a namespace + LimitTypePod LimitType = "Pod" + // Limit that applies to all containers in a namespace + LimitTypeContainer LimitType = "Container" + // Limit that applies to all persistent volume claims in a namespace + LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" +) + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind. +type LimitRangeItem struct { + // Type of resource that this limit applies to. + // +optional + Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"` + // Max usage constraints on this kind by resource name. + // +optional + Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"` + // Min usage constraints on this kind by resource name. + // +optional + Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"` + // Default resource requirement limit value by resource name if resource limit is omitted. + // +optional + Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"` + // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. + // +optional + DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"` + // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. + // +optional + MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"` +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind. +type LimitRangeSpec struct { + // Limits is the list of LimitRangeItem objects that are enforced. + Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"` +} + +// +genclient=true + +// LimitRange sets resource usage limits for each kind of resource in a Namespace. +type LimitRange struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the limits enforced. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// LimitRangeList is a list of LimitRange items. +type LimitRangeList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of LimitRange objects. + // More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_limit_range.md + Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// The following identify resource constants for Kubernetes object types +const ( + // Pods, number + ResourcePods ResourceName = "pods" + // Services, number + ResourceServices ResourceName = "services" + // ReplicationControllers, number + ResourceReplicationControllers ResourceName = "replicationcontrollers" + // ResourceQuotas, number + ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourceConfigMaps, number + ResourceConfigMaps ResourceName = "configmaps" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" + // ResourceServicesNodePorts, number + ResourceServicesNodePorts ResourceName = "services.nodeports" + // ResourceServicesLoadBalancers, number + ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" + // CPU request, in cores. (500m = .5 cores) + ResourceRequestsCPU ResourceName = "requests.cpu" + // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsMemory ResourceName = "requests.memory" + // Storage request, in bytes + ResourceRequestsStorage ResourceName = "requests.storage" + // CPU limit, in cores. (500m = .5 cores) + ResourceLimitsCPU ResourceName = "limits.cpu" + // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsMemory ResourceName = "limits.memory" +) + +// A ResourceQuotaScope defines a filter that must match each object tracked by a quota +type ResourceQuotaScope string + +const ( + // Match all pod objects where spec.activeDeadlineSeconds + ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" + // Match all pod objects where !spec.activeDeadlineSeconds + ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" + // Match all pod objects that have best effort quality of service + ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" + // Match all pod objects that do not have best effort quality of service + ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" +) + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. +type ResourceQuotaSpec struct { + // Hard is the set of desired hard limits for each named resource. + // More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md + // +optional + Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"` +} + +// ResourceQuotaStatus defines the enforced hard limits and observed use. +type ResourceQuotaStatus struct { + // Hard is the set of enforced hard limits for each named resource. + // More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md + // +optional + Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` + // Used is the current observed total usage of the resource in the namespace. + // +optional + Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"` +} + +// +genclient=true + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +type ResourceQuota struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired quota. + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status defines the actual enforced quota and its current usage. + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceQuotaList is a list of ResourceQuota items. +type ResourceQuotaList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ResourceQuota objects. + // More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md + Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +type Secret struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data contains the secret data. Each key must consist of alphanumeric + // characters, '-', '_' or '.'. The serialized form of the secret data is a + // base64 encoded string, representing the arbitrary (possibly non-string) + // data value here. Described in https://tools.ietf.org/html/rfc4648#section-4 + // +optional + Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"` + + // stringData allows specifying non-binary secret data in string form. + // It is provided as a write-only convenience method. + // All keys and values are merged into the data field on write, overwriting any existing values. + // It is never output when reading from the API. + // +k8s:conversion-gen=false + // +optional + StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"` + + // Used to facilitate programmatic handling of secret data. + // +optional + Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"` +} + +const MaxSecretSize = 1 * 1024 * 1024 + +type SecretType string + +const ( + // SecretTypeOpaque is the default. Arbitrary user-defined data + SecretTypeOpaque SecretType = "Opaque" + + // SecretTypeServiceAccountToken contains a token that identifies a service account to the API + // + // Required fields: + // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies + // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies + // - Secret.Data["token"] - a token that identifies the service account to the API + SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" + + // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountNameKey = "kubernetes.io/service-account.name" + // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountUIDKey = "kubernetes.io/service-account.uid" + // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets + ServiceAccountTokenKey = "token" + // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets + ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" + // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets + ServiceAccountRootCAKey = "ca.crt" + // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls + ServiceAccountNamespaceKey = "namespace" + + // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg + // + // Required fields: + // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file + SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" + + // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets + DockerConfigKey = ".dockercfg" + + // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json + // + // Required fields: + // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file + SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" + + // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets + DockerConfigJsonKey = ".dockerconfigjson" + + // SecretTypeBasicAuth contains data needed for basic authentication. + // + // Required at least one of fields: + // - Secret.Data["username"] - username used for authentication + // - Secret.Data["password"] - password or token needed for authentication + SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" + + // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets + BasicAuthUsernameKey = "username" + // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets + BasicAuthPasswordKey = "password" + + // SecretTypeSSHAuth contains data needed for SSH authetication. + // + // Required field: + // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication + SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" + + // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets + SSHAuthPrivateKey = "ssh-privatekey" + // SecretTypeTLS contains information about a TLS client or server secret. It + // is primarily used with TLS termination of the Ingress resource, but may be + // used in other types. + // + // Required fields: + // - Secret.Data["tls.key"] - TLS private key. + // Secret.Data["tls.crt"] - TLS certificate. + // TODO: Consider supporting different formats, specifying CA/destinationCA. + SecretTypeTLS SecretType = "kubernetes.io/tls" + + // TLSCertKey is the key for tls certificates in a TLS secert. + TLSCertKey = "tls.crt" + // TLSPrivateKeyKey is the key for the private key field in a TLS secret. + TLSPrivateKeyKey = "tls.key" +) + +// SecretList is a list of Secret. +type SecretList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of secret objects. + // More info: https://kubernetes.io/docs/concepts/configuration/secret + Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// ConfigMap holds configuration data for pods to consume. +type ConfigMap struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data contains the configuration data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // +optional + Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"` +} + +// ConfigMapList is a resource containing a list of ConfigMap objects. +type ConfigMapList struct { + metav1.TypeMeta `json:",inline"` + + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ConfigMaps. + Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +// Information about the condition of a component. +type ComponentCondition struct { + // Type of condition for a component. + // Valid value: "Healthy" + Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"` + // Status of the condition for a component. + // Valid values for "Healthy": "True", "False", or "Unknown". + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Message about the condition for a component. + // For example, information about a health check. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // Condition error code for a component. + // For example, a health check error code. + // +optional + Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"` +} + +// +genclient=true +// +nonNamespaced=true + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of component conditions observed + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` +} + +// Status of all the conditions for the component as a list of ComponentStatus objects. +type ComponentStatusList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ComponentStatus objects. + Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// DownwardAPIVolumeSource represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +type DownwardAPIVolumeSource struct { + // Items is a list of downward API volume file + // +optional + Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"` +} + +const ( + DownwardAPIVolumeSourceDefaultMode int32 = 0644 +) + +// DownwardAPIVolumeFile represents information to create the file containing the pod field +type DownwardAPIVolumeFile struct { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + Path string `json:"path" protobuf:"bytes,1,opt,name=path"` + // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // +optional + FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"` + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"` + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"` +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +type SecurityContext struct { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"` + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"` + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"` + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsUser *types.UnixUserID `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser,casttype=k8s.io/apimachinery/pkg/types.UnixUserID"` + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"` + // Whether this container has a read-only root filesystem. + // Default is false. + // +optional + ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"` +} + +// SELinuxOptions are the labels to be applied to the container +type SELinuxOptions struct { + // User is a SELinux user label that applies to the container. + // +optional + User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"` + // Role is a SELinux role label that applies to the container. + // +optional + Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"` + // Type is a SELinux type label that applies to the container. + // +optional + Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"` + // Level is SELinux level label that applies to the container. + // +optional + Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"` +} + +// RangeAllocation is not a public type. +type RangeAllocation struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Range is string that identifies the range represented by 'data'. + Range string `json:"range" protobuf:"bytes,2,opt,name=range"` + // Data is a bit array containing all allocated addresses in the previous segment. + Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"` +} + +const ( + // "default-scheduler" is the name of default scheduler. + DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int = 1 +) + +// Sysctl defines a kernel parameter to be set +type Sysctl struct { + // Name of a property to set + Name string `protobuf:"bytes,1,opt,name=name"` + // Value of a property to set + Value string `protobuf:"bytes,2,opt,name=value"` +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` +} + +const ( + // Enable stdin for remote command execution + ExecStdinParam = "input" + // Enable stdout for remote command execution + ExecStdoutParam = "output" + // Enable stderr for remote command execution + ExecStderrParam = "error" + // Enable TTY for remote command execution + ExecTTYParam = "tty" + // Command to run for remote command execution + ExecCommandParamm = "command" + + // Name of header that specifies stream type + StreamType = "streamType" + // Value for streamType header for stdin stream + StreamTypeStdin = "stdin" + // Value for streamType header for stdout stream + StreamTypeStdout = "stdout" + // Value for streamType header for stderr stream + StreamTypeStderr = "stderr" + // Value for streamType header for data stream + StreamTypeData = "data" + // Value for streamType header for error stream + StreamTypeError = "error" + // Value for streamType header for terminal resize stream + StreamTypeResize = "resize" + + // Name of header that specifies the port being forwarded + PortHeader = "port" + // Name of header that specifies a request ID used to associate the error + // and data streams for a single forwarded connection + PortForwardRequestIDHeader = "requestID" +) diff --git a/vendor/k8s.io/client-go/pkg/api/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/api/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..73bcc6197 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/types_swagger_doc_generated.go @@ -0,0 +1,2039 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AWSElasticBlockStoreVolumeSource = map[string]string{ + "": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", + "volumeID": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", + "readOnly": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", +} + +func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string { + return map_AWSElasticBlockStoreVolumeSource +} + +var map_Affinity = map[string]string{ + "": "Affinity is a group of affinity scheduling rules.", + "nodeAffinity": "Describes node affinity scheduling rules for the pod.", + "podAffinity": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).", + "podAntiAffinity": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).", +} + +func (Affinity) SwaggerDoc() map[string]string { + return map_Affinity +} + +var map_AttachedVolume = map[string]string{ + "": "AttachedVolume describes a volume attached to a node", + "name": "Name of the attached volume", + "devicePath": "DevicePath represents the device path where the volume should be available", +} + +func (AttachedVolume) SwaggerDoc() map[string]string { + return map_AttachedVolume +} + +var map_AvoidPods = map[string]string{ + "": "AvoidPods describes pods that should avoid this node. This is the value for a Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and will eventually become a field of NodeStatus.", + "preferAvoidPods": "Bounded-sized list of signatures of pods that should avoid this node, sorted in timestamp order from oldest to newest. Size of the slice is unspecified.", +} + +func (AvoidPods) SwaggerDoc() map[string]string { + return map_AvoidPods +} + +var map_AzureDiskVolumeSource = map[string]string{ + "": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "diskName": "The Name of the data disk in the blob storage", + "diskURI": "The URI the data disk in the blob storage", + "cachingMode": "Host Caching mode: None, Read Only, Read Write.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "kind": "Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", +} + +func (AzureDiskVolumeSource) SwaggerDoc() map[string]string { + return map_AzureDiskVolumeSource +} + +var map_AzureFileVolumeSource = map[string]string{ + "": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "secretName": "the name of secret that contains Azure Storage Account Name and Key", + "shareName": "Share Name", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (AzureFileVolumeSource) SwaggerDoc() map[string]string { + return map_AzureFileVolumeSource +} + +var map_Binding = map[string]string{ + "": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "target": "The target object that you want to bind to the standard object.", +} + +func (Binding) SwaggerDoc() map[string]string { + return map_Binding +} + +var map_Capabilities = map[string]string{ + "": "Adds and removes POSIX capabilities from running containers.", + "add": "Added capabilities", + "drop": "Removed capabilities", +} + +func (Capabilities) SwaggerDoc() map[string]string { + return map_Capabilities +} + +var map_CephFSVolumeSource = map[string]string{ + "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + "user": "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", +} + +func (CephFSVolumeSource) SwaggerDoc() map[string]string { + return map_CephFSVolumeSource +} + +var map_CinderVolumeSource = map[string]string{ + "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", + "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", +} + +func (CinderVolumeSource) SwaggerDoc() map[string]string { + return map_CinderVolumeSource +} + +var map_ComponentCondition = map[string]string{ + "": "Information about the condition of a component.", + "type": "Type of condition for a component. Valid value: \"Healthy\"", + "status": "Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\".", + "message": "Message about the condition for a component. For example, information about a health check.", + "error": "Condition error code for a component. For example, a health check error code.", +} + +func (ComponentCondition) SwaggerDoc() map[string]string { + return map_ComponentCondition +} + +var map_ComponentStatus = map[string]string{ + "": "ComponentStatus (and ComponentStatusList) holds the cluster validation info.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "conditions": "List of component conditions observed", +} + +func (ComponentStatus) SwaggerDoc() map[string]string { + return map_ComponentStatus +} + +var map_ComponentStatusList = map[string]string{ + "": "Status of all the conditions for the component as a list of ComponentStatus objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of ComponentStatus objects.", +} + +func (ComponentStatusList) SwaggerDoc() map[string]string { + return map_ComponentStatusList +} + +var map_ConfigMap = map[string]string{ + "": "ConfigMap holds configuration data for pods to consume.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "data": "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'.", +} + +func (ConfigMap) SwaggerDoc() map[string]string { + return map_ConfigMap +} + +var map_ConfigMapEnvSource = map[string]string{ + "": "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.", + "optional": "Specify whether the ConfigMap must be defined", +} + +func (ConfigMapEnvSource) SwaggerDoc() map[string]string { + return map_ConfigMapEnvSource +} + +var map_ConfigMapKeySelector = map[string]string{ + "": "Selects a key from a ConfigMap.", + "key": "The key to select.", + "optional": "Specify whether the ConfigMap or it's key must be defined", +} + +func (ConfigMapKeySelector) SwaggerDoc() map[string]string { + return map_ConfigMapKeySelector +} + +var map_ConfigMapList = map[string]string{ + "": "ConfigMapList is a resource containing a list of ConfigMap objects.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of ConfigMaps.", +} + +func (ConfigMapList) SwaggerDoc() map[string]string { + return map_ConfigMapList +} + +var map_ConfigMapProjection = map[string]string{ + "": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", + "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "optional": "Specify whether the ConfigMap or it's keys must be defined", +} + +func (ConfigMapProjection) SwaggerDoc() map[string]string { + return map_ConfigMapProjection +} + +var map_ConfigMapVolumeSource = map[string]string{ + "": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", + "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "optional": "Specify whether the ConfigMap or it's keys must be defined", +} + +func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { + return map_ConfigMapVolumeSource +} + +var map_Container = map[string]string{ + "": "A single application container that you want to run within a pod.", + "name": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + "image": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images", + "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "env": "List of environment variables to set in the container. Cannot be updated.", + "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "securityContext": "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md", + "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", +} + +func (Container) SwaggerDoc() map[string]string { + return map_Container +} + +var map_ContainerImage = map[string]string{ + "": "Describe a container image", + "names": "Names by which this image is known. e.g. [\"gcr.io/google_containers/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", + "sizeBytes": "The size of the image in bytes.", +} + +func (ContainerImage) SwaggerDoc() map[string]string { + return map_ContainerImage +} + +var map_ContainerPort = map[string]string{ + "": "ContainerPort represents a network port in a single container.", + "name": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.", + "hostPort": "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.", + "containerPort": "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.", + "protocol": "Protocol for port. Must be UDP or TCP. Defaults to \"TCP\".", + "hostIP": "What host IP to bind the external port to.", +} + +func (ContainerPort) SwaggerDoc() map[string]string { + return map_ContainerPort +} + +var map_ContainerState = map[string]string{ + "": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", + "waiting": "Details about a waiting container", + "running": "Details about a running container", + "terminated": "Details about a terminated container", +} + +func (ContainerState) SwaggerDoc() map[string]string { + return map_ContainerState +} + +var map_ContainerStateRunning = map[string]string{ + "": "ContainerStateRunning is a running state of a container.", + "startedAt": "Time at which the container was last (re-)started", +} + +func (ContainerStateRunning) SwaggerDoc() map[string]string { + return map_ContainerStateRunning +} + +var map_ContainerStateTerminated = map[string]string{ + "": "ContainerStateTerminated is a terminated state of a container.", + "exitCode": "Exit status from the last termination of the container", + "signal": "Signal from the last termination of the container", + "reason": "(brief) reason from the last termination of the container", + "message": "Message regarding the last termination of the container", + "startedAt": "Time at which previous execution of the container started", + "finishedAt": "Time at which the container last terminated", + "containerID": "Container's ID in the format 'docker://'", +} + +func (ContainerStateTerminated) SwaggerDoc() map[string]string { + return map_ContainerStateTerminated +} + +var map_ContainerStateWaiting = map[string]string{ + "": "ContainerStateWaiting is a waiting state of a container.", + "reason": "(brief) reason the container is not yet running.", + "message": "Message regarding why the container is not yet running.", +} + +func (ContainerStateWaiting) SwaggerDoc() map[string]string { + return map_ContainerStateWaiting +} + +var map_ContainerStatus = map[string]string{ + "": "ContainerStatus contains details for the current status of this container.", + "name": "This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.", + "state": "Details about the container's current condition.", + "lastState": "Details about the container's last termination condition.", + "ready": "Specifies whether the container has passed its readiness probe.", + "restartCount": "The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.", + "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images", + "imageID": "ImageID of the container's image.", + "containerID": "Container's ID in the format 'docker://'.", +} + +func (ContainerStatus) SwaggerDoc() map[string]string { + return map_ContainerStatus +} + +var map_DaemonEndpoint = map[string]string{ + "": "DaemonEndpoint contains information about a single Daemon endpoint.", + "Port": "Port number of the given endpoint.", +} + +func (DaemonEndpoint) SwaggerDoc() map[string]string { + return map_DaemonEndpoint +} + +var map_DeleteOptions = map[string]string{ + "": "DeleteOptions may be provided when deleting an API object DEPRECATED: This type has been moved to meta/v1 and will be removed soon.", + "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", + "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "PropagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", +} + +func (DeleteOptions) SwaggerDoc() map[string]string { + return map_DeleteOptions +} + +var map_DownwardAPIProjection = map[string]string{ + "": "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.", + "items": "Items is a list of DownwardAPIVolume file", +} + +func (DownwardAPIProjection) SwaggerDoc() map[string]string { + return map_DownwardAPIProjection +} + +var map_DownwardAPIVolumeFile = map[string]string{ + "": "DownwardAPIVolumeFile represents information to create the file containing the pod field", + "path": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", + "fieldRef": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", + "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", + "mode": "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (DownwardAPIVolumeFile) SwaggerDoc() map[string]string { + return map_DownwardAPIVolumeFile +} + +var map_DownwardAPIVolumeSource = map[string]string{ + "": "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.", + "items": "Items is a list of downward API volume file", + "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string { + return map_DownwardAPIVolumeSource +} + +var map_EmptyDirVolumeSource = map[string]string{ + "": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", + "medium": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + "sizeLimit": "Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", +} + +func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { + return map_EmptyDirVolumeSource +} + +var map_EndpointAddress = map[string]string{ + "": "EndpointAddress is a tuple that describes single IP address.", + "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.", + "hostname": "The Hostname of this endpoint", + "nodeName": "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.", + "targetRef": "Reference to object providing the endpoint.", +} + +func (EndpointAddress) SwaggerDoc() map[string]string { + return map_EndpointAddress +} + +var map_EndpointPort = map[string]string{ + "": "EndpointPort is a tuple that describes a single port.", + "name": "The name of this port (corresponds to ServicePort.Name). Must be a DNS_LABEL. Optional only if one port is defined.", + "port": "The port number of the endpoint.", + "protocol": "The IP protocol for this port. Must be UDP or TCP. Default is TCP.", +} + +func (EndpointPort) SwaggerDoc() map[string]string { + return map_EndpointPort +} + +var map_EndpointSubset = map[string]string{ + "": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]", + "addresses": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", + "notReadyAddresses": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.", + "ports": "Port numbers available on the related IP addresses.", +} + +func (EndpointSubset) SwaggerDoc() map[string]string { + return map_EndpointSubset +} + +var map_Endpoints = map[string]string{ + "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", +} + +func (Endpoints) SwaggerDoc() map[string]string { + return map_Endpoints +} + +var map_EndpointsList = map[string]string{ + "": "EndpointsList is a list of endpoints.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of endpoints.", +} + +func (EndpointsList) SwaggerDoc() map[string]string { + return map_EndpointsList +} + +var map_EnvFromSource = map[string]string{ + "": "EnvFromSource represents the source of a set of ConfigMaps", + "prefix": "An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", + "configMapRef": "The ConfigMap to select from", + "secretRef": "The Secret to select from", +} + +func (EnvFromSource) SwaggerDoc() map[string]string { + return map_EnvFromSource +} + +var map_EnvVar = map[string]string{ + "": "EnvVar represents an environment variable present in a Container.", + "name": "Name of the environment variable. Must be a C_IDENTIFIER.", + "value": "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", + "valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.", +} + +func (EnvVar) SwaggerDoc() map[string]string { + return map_EnvVar +} + +var map_EnvVarSource = map[string]string{ + "": "EnvVarSource represents a source for the value of an EnvVar.", + "fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.", + "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", + "configMapKeyRef": "Selects a key of a ConfigMap.", + "secretKeyRef": "Selects a key of a secret in the pod's namespace", +} + +func (EnvVarSource) SwaggerDoc() map[string]string { + return map_EnvVarSource +} + +var map_Event = map[string]string{ + "": "Event is a report of an event somewhere in the cluster.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "involvedObject": "The object that this event is about.", + "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", + "message": "A human-readable description of the status of this operation.", + "source": "The component reporting this event. Should be a short machine understandable string.", + "firstTimestamp": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", + "lastTimestamp": "The time at which the most recent occurrence of this event was recorded.", + "count": "The number of times this event has occurred.", + "type": "Type of this event (Normal, Warning), new types could be added in the future", +} + +func (Event) SwaggerDoc() map[string]string { + return map_Event +} + +var map_EventList = map[string]string{ + "": "EventList is a list of events.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of events", +} + +func (EventList) SwaggerDoc() map[string]string { + return map_EventList +} + +var map_EventSource = map[string]string{ + "": "EventSource contains information for an event.", + "component": "Component from which the event is generated.", + "host": "Node name on which the event is generated.", +} + +func (EventSource) SwaggerDoc() map[string]string { + return map_EventSource +} + +var map_ExecAction = map[string]string{ + "": "ExecAction describes a \"run in container\" action.", + "command": "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.", +} + +func (ExecAction) SwaggerDoc() map[string]string { + return map_ExecAction +} + +var map_FCVolumeSource = map[string]string{ + "": "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.", + "targetWWNs": "Required: FC target worldwide names (WWNs)", + "lun": "Required: FC target lun number", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (FCVolumeSource) SwaggerDoc() map[string]string { + return map_FCVolumeSource +} + +var map_FlexVolumeSource = map[string]string{ + "": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "driver": "Driver is the name of the driver to use for this volume.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "options": "Optional: Extra command options if any.", +} + +func (FlexVolumeSource) SwaggerDoc() map[string]string { + return map_FlexVolumeSource +} + +var map_FlockerVolumeSource = map[string]string{ + "": "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.", + "datasetName": "Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated", + "datasetUUID": "UUID of the dataset. This is unique identifier of a Flocker dataset", +} + +func (FlockerVolumeSource) SwaggerDoc() map[string]string { + return map_FlockerVolumeSource +} + +var map_GCEPersistentDiskVolumeSource = map[string]string{ + "": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", + "pdName": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", +} + +func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string { + return map_GCEPersistentDiskVolumeSource +} + +var map_GitRepoVolumeSource = map[string]string{ + "": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.", + "repository": "Repository URL", + "revision": "Commit hash for the specified revision.", + "directory": "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", +} + +func (GitRepoVolumeSource) SwaggerDoc() map[string]string { + return map_GitRepoVolumeSource +} + +var map_GlusterfsVolumeSource = map[string]string{ + "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", +} + +func (GlusterfsVolumeSource) SwaggerDoc() map[string]string { + return map_GlusterfsVolumeSource +} + +var map_HTTPGetAction = map[string]string{ + "": "HTTPGetAction describes an action based on HTTP Get requests.", + "path": "Path to access on the HTTP server.", + "port": "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", + "host": "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.", + "scheme": "Scheme to use for connecting to the host. Defaults to HTTP.", + "httpHeaders": "Custom headers to set in the request. HTTP allows repeated headers.", +} + +func (HTTPGetAction) SwaggerDoc() map[string]string { + return map_HTTPGetAction +} + +var map_HTTPHeader = map[string]string{ + "": "HTTPHeader describes a custom header to be used in HTTP probes", + "name": "The header field name", + "value": "The header field value", +} + +func (HTTPHeader) SwaggerDoc() map[string]string { + return map_HTTPHeader +} + +var map_Handler = map[string]string{ + "": "Handler defines a specific action that should be taken", + "exec": "One and only one of the following should be specified. Exec specifies the action to take.", + "httpGet": "HTTPGet specifies the http request to perform.", + "tcpSocket": "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported", +} + +func (Handler) SwaggerDoc() map[string]string { + return map_Handler +} + +var map_HostAlias = map[string]string{ + "": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.", + "ip": "IP address of the host file entry.", + "hostnames": "Hostnames for the above IP address.", +} + +func (HostAlias) SwaggerDoc() map[string]string { + return map_HostAlias +} + +var map_HostPathVolumeSource = map[string]string{ + "": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", + "path": "Path of the directory on the host. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", +} + +func (HostPathVolumeSource) SwaggerDoc() map[string]string { + return map_HostPathVolumeSource +} + +var map_ISCSIVolumeSource = map[string]string{ + "": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "targetPortal": "iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "iqn": "Target iSCSI Qualified Name.", + "lun": "iSCSI target lun number.", + "iscsiInterface": "Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "portals": "iSCSI target portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication", + "chapAuthSession": "whether support iSCSI Session CHAP authentication", + "secretRef": "CHAP secret for iSCSI target and initiator authentication", +} + +func (ISCSIVolumeSource) SwaggerDoc() map[string]string { + return map_ISCSIVolumeSource +} + +var map_KeyToPath = map[string]string{ + "": "Maps a string key to a path within a volume.", + "key": "The key to project.", + "path": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", + "mode": "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (KeyToPath) SwaggerDoc() map[string]string { + return map_KeyToPath +} + +var map_Lifecycle = map[string]string{ + "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", + "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", +} + +func (Lifecycle) SwaggerDoc() map[string]string { + return map_Lifecycle +} + +var map_LimitRange = map[string]string{ + "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (LimitRange) SwaggerDoc() map[string]string { + return map_LimitRange +} + +var map_LimitRangeItem = map[string]string{ + "": "LimitRangeItem defines a min/max usage limit for any resource that matches on kind.", + "type": "Type of resource that this limit applies to.", + "max": "Max usage constraints on this kind by resource name.", + "min": "Min usage constraints on this kind by resource name.", + "default": "Default resource requirement limit value by resource name if resource limit is omitted.", + "defaultRequest": "DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.", + "maxLimitRequestRatio": "MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.", +} + +func (LimitRangeItem) SwaggerDoc() map[string]string { + return map_LimitRangeItem +} + +var map_LimitRangeList = map[string]string{ + "": "LimitRangeList is a list of LimitRange items.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "Items is a list of LimitRange objects. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_limit_range.md", +} + +func (LimitRangeList) SwaggerDoc() map[string]string { + return map_LimitRangeList +} + +var map_LimitRangeSpec = map[string]string{ + "": "LimitRangeSpec defines a min/max usage limit for resources that match on kind.", + "limits": "Limits is the list of LimitRangeItem objects that are enforced.", +} + +func (LimitRangeSpec) SwaggerDoc() map[string]string { + return map_LimitRangeSpec +} + +var map_List = map[string]string{ + "": "List holds a list of objects, which may not be known by the server.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of objects", +} + +func (List) SwaggerDoc() map[string]string { + return map_List +} + +var map_ListOptions = map[string]string{ + "": "ListOptions is the query options to a standard REST list call. DEPRECATED: This type has been moved to meta/v1 and will be removed soon.", + "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "includeUninitialized": "If true, partially initialized resources are included in the response.", + "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "timeoutSeconds": "Timeout for the list/watch call.", +} + +func (ListOptions) SwaggerDoc() map[string]string { + return map_ListOptions +} + +var map_LoadBalancerIngress = map[string]string{ + "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", + "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", + "hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", +} + +func (LoadBalancerIngress) SwaggerDoc() map[string]string { + return map_LoadBalancerIngress +} + +var map_LoadBalancerStatus = map[string]string{ + "": "LoadBalancerStatus represents the status of a load-balancer.", + "ingress": "Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.", +} + +func (LoadBalancerStatus) SwaggerDoc() map[string]string { + return map_LoadBalancerStatus +} + +var map_LocalObjectReference = map[string]string{ + "": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", + "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", +} + +func (LocalObjectReference) SwaggerDoc() map[string]string { + return map_LocalObjectReference +} + +var map_LocalVolumeSource = map[string]string{ + "": "Local represents directly-attached storage with node affinity", + "path": "The full path to the volume on the node For alpha, this path must be a directory Once block as a source is supported, then this path can point to a block device", +} + +func (LocalVolumeSource) SwaggerDoc() map[string]string { + return map_LocalVolumeSource +} + +var map_NFSVolumeSource = map[string]string{ + "": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", + "server": "Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "path": "Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "readOnly": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", +} + +func (NFSVolumeSource) SwaggerDoc() map[string]string { + return map_NFSVolumeSource +} + +var map_Namespace = map[string]string{ + "": "Namespace provides a scope for Names. Use of multiple namespaces is optional.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (Namespace) SwaggerDoc() map[string]string { + return map_Namespace +} + +var map_NamespaceList = map[string]string{ + "": "NamespaceList is a list of Namespaces.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", +} + +func (NamespaceList) SwaggerDoc() map[string]string { + return map_NamespaceList +} + +var map_NamespaceSpec = map[string]string{ + "": "NamespaceSpec describes the attributes on a Namespace.", + "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#finalizers", +} + +func (NamespaceSpec) SwaggerDoc() map[string]string { + return map_NamespaceSpec +} + +var map_NamespaceStatus = map[string]string{ + "": "NamespaceStatus is information about the current status of a Namespace.", + "phase": "Phase is the current lifecycle phase of the namespace. More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#phases", +} + +func (NamespaceStatus) SwaggerDoc() map[string]string { + return map_NamespaceStatus +} + +var map_Node = map[string]string{ + "": "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (Node) SwaggerDoc() map[string]string { + return map_Node +} + +var map_NodeAddress = map[string]string{ + "": "NodeAddress contains information for the node's address.", + "type": "Node address type, one of Hostname, ExternalIP or InternalIP.", + "address": "The node address.", +} + +func (NodeAddress) SwaggerDoc() map[string]string { + return map_NodeAddress +} + +var map_NodeAffinity = map[string]string{ + "": "Node affinity is a group of node affinity scheduling rules.", + "requiredDuringSchedulingIgnoredDuringExecution": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", +} + +func (NodeAffinity) SwaggerDoc() map[string]string { + return map_NodeAffinity +} + +var map_NodeCondition = map[string]string{ + "": "NodeCondition contains condition information for a node.", + "type": "Type of node condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastHeartbeatTime": "Last time we got an update on a given condition.", + "lastTransitionTime": "Last time the condition transit from one status to another.", + "reason": "(brief) reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (NodeCondition) SwaggerDoc() map[string]string { + return map_NodeCondition +} + +var map_NodeDaemonEndpoints = map[string]string{ + "": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.", + "kubeletEndpoint": "Endpoint on which Kubelet is listening.", +} + +func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { + return map_NodeDaemonEndpoints +} + +var map_NodeList = map[string]string{ + "": "NodeList is the whole list of all Nodes which have been registered with master.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of nodes", +} + +func (NodeList) SwaggerDoc() map[string]string { + return map_NodeList +} + +var map_NodeProxyOptions = map[string]string{ + "": "NodeProxyOptions is the query options to a Node's proxy call.", + "path": "Path is the URL path to use for the current proxy request to node.", +} + +func (NodeProxyOptions) SwaggerDoc() map[string]string { + return map_NodeProxyOptions +} + +var map_NodeResources = map[string]string{ + "": "NodeResources is an object for conveying resource information about a node. see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.", + "Capacity": "Capacity represents the available resources of a node", +} + +func (NodeResources) SwaggerDoc() map[string]string { + return map_NodeResources +} + +var map_NodeSelector = map[string]string{ + "": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", + "nodeSelectorTerms": "Required. A list of node selector terms. The terms are ORed.", +} + +func (NodeSelector) SwaggerDoc() map[string]string { + return map_NodeSelector +} + +var map_NodeSelectorRequirement = map[string]string{ + "": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "The label key that the selector applies to.", + "operator": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", + "values": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", +} + +func (NodeSelectorRequirement) SwaggerDoc() map[string]string { + return map_NodeSelectorRequirement +} + +var map_NodeSelectorTerm = map[string]string{ + "": "A null or empty node selector term matches no objects.", + "matchExpressions": "Required. A list of node selector requirements. The requirements are ANDed.", +} + +func (NodeSelectorTerm) SwaggerDoc() map[string]string { + return map_NodeSelectorTerm +} + +var map_NodeSpec = map[string]string{ + "": "NodeSpec describes the attributes that a node is created with.", + "podCIDR": "PodCIDR represents the pod IP range assigned to the node.", + "externalID": "External ID of the node assigned by some machine database (e.g. a cloud provider). Deprecated.", + "providerID": "ID of the node assigned by the cloud provider in the format: ://", + "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", + "taints": "If specified, the node's taints.", +} + +func (NodeSpec) SwaggerDoc() map[string]string { + return map_NodeSpec +} + +var map_NodeStatus = map[string]string{ + "": "NodeStatus is information about the current status of a node.", + "capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", + "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", + "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses", + "daemonEndpoints": "Endpoints of daemons running on the Node.", + "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", + "images": "List of container images on this node", + "volumesInUse": "List of attachable volumes in use (mounted) by the node.", + "volumesAttached": "List of volumes that are attached to the node.", +} + +func (NodeStatus) SwaggerDoc() map[string]string { + return map_NodeStatus +} + +var map_NodeSystemInfo = map[string]string{ + "": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.", + "machineID": "MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html", + "systemUUID": "SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html", + "bootID": "Boot ID reported by the node.", + "kernelVersion": "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).", + "osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", + "containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).", + "kubeletVersion": "Kubelet Version reported by the node.", + "kubeProxyVersion": "KubeProxy Version reported by the node.", + "operatingSystem": "The Operating System reported by the node", + "architecture": "The Architecture reported by the node", +} + +func (NodeSystemInfo) SwaggerDoc() map[string]string { + return map_NodeSystemInfo +} + +var map_ObjectFieldSelector = map[string]string{ + "": "ObjectFieldSelector selects an APIVersioned field of an object.", + "apiVersion": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".", + "fieldPath": "Path of the field to select in the specified API version.", +} + +func (ObjectFieldSelector) SwaggerDoc() map[string]string { + return map_ObjectFieldSelector +} + +var map_ObjectMeta = map[string]string{ + "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon.", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency", + "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", + "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/", + "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", + "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.", + "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", + "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", +} + +func (ObjectMeta) SwaggerDoc() map[string]string { + return map_ObjectMeta +} + +var map_ObjectReference = map[string]string{ + "": "ObjectReference contains enough information to let you inspect or modify the referred object.", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "namespace": "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", + "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "uid": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids", + "apiVersion": "API version of the referent.", + "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "fieldPath": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", +} + +func (ObjectReference) SwaggerDoc() map[string]string { + return map_ObjectReference +} + +var map_PersistentVolume = map[string]string{ + "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", + "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", +} + +func (PersistentVolume) SwaggerDoc() map[string]string { + return map_PersistentVolume +} + +var map_PersistentVolumeClaim = map[string]string{ + "": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", +} + +func (PersistentVolumeClaim) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaim +} + +var map_PersistentVolumeClaimList = map[string]string{ + "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "A list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", +} + +func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimList +} + +var map_PersistentVolumeClaimSpec = map[string]string{ + "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", + "accessModes": "AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "selector": "A label query over volumes to consider for binding.", + "resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", + "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", +} + +func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimSpec +} + +var map_PersistentVolumeClaimStatus = map[string]string{ + "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", + "phase": "Phase represents the current phase of PersistentVolumeClaim.", + "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "capacity": "Represents the actual resources of the underlying volume.", +} + +func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimStatus +} + +var map_PersistentVolumeClaimVolumeSource = map[string]string{ + "": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", + "claimName": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "readOnly": "Will force the ReadOnly setting in VolumeMounts. Default false.", +} + +func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimVolumeSource +} + +var map_PersistentVolumeList = map[string]string{ + "": "PersistentVolumeList is a list of PersistentVolume items.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", +} + +func (PersistentVolumeList) SwaggerDoc() map[string]string { + return map_PersistentVolumeList +} + +var map_PersistentVolumeSource = map[string]string{ + "": "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.", + "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "local": "Local represents directly-attached storage with node affinity", + "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md", +} + +func (PersistentVolumeSource) SwaggerDoc() map[string]string { + return map_PersistentVolumeSource +} + +var map_PersistentVolumeSpec = map[string]string{ + "": "PersistentVolumeSpec is the specification of a persistent volume.", + "capacity": "A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + "accessModes": "AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes", + "claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding", + "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", + "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", +} + +func (PersistentVolumeSpec) SwaggerDoc() map[string]string { + return map_PersistentVolumeSpec +} + +var map_PersistentVolumeStatus = map[string]string{ + "": "PersistentVolumeStatus is the current status of a persistent volume.", + "phase": "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", + "message": "A human-readable message indicating details about why the volume is in this state.", + "reason": "Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", +} + +func (PersistentVolumeStatus) SwaggerDoc() map[string]string { + return map_PersistentVolumeStatus +} + +var map_PhotonPersistentDiskVolumeSource = map[string]string{ + "": "Represents a Photon Controller persistent disk resource.", + "pdID": "ID that identifies Photon Controller persistent disk", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", +} + +func (PhotonPersistentDiskVolumeSource) SwaggerDoc() map[string]string { + return map_PhotonPersistentDiskVolumeSource +} + +var map_Pod = map[string]string{ + "": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (Pod) SwaggerDoc() map[string]string { + return map_Pod +} + +var map_PodAffinity = map[string]string{ + "": "Pod affinity is a group of inter pod affinity scheduling rules.", + "requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", +} + +func (PodAffinity) SwaggerDoc() map[string]string { + return map_PodAffinity +} + +var map_PodAffinityTerm = map[string]string{ + "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key tches that of any node on which a pod of the set of pods is running", + "labelSelector": "A label query over a set of resources, in this case pods.", + "namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", + "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.", +} + +func (PodAffinityTerm) SwaggerDoc() map[string]string { + return map_PodAffinityTerm +} + +var map_PodAntiAffinity = map[string]string{ + "": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", + "requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", +} + +func (PodAntiAffinity) SwaggerDoc() map[string]string { + return map_PodAntiAffinity +} + +var map_PodAttachOptions = map[string]string{ + "": "PodAttachOptions is the query options to a Pod's remote attach call.", + "stdin": "Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.", + "stdout": "Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.", + "stderr": "Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.", + "tty": "TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.", + "container": "The container in which to execute the command. Defaults to only container if there is only one container in the pod.", +} + +func (PodAttachOptions) SwaggerDoc() map[string]string { + return map_PodAttachOptions +} + +var map_PodCondition = map[string]string{ + "": "PodCondition contains details for the current condition of this pod.", + "type": "Type is the type of the condition. Currently only Ready. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "status": "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "lastProbeTime": "Last time we probed the condition.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "Unique, one-word, CamelCase reason for the condition's last transition.", + "message": "Human-readable message indicating details about last transition.", +} + +func (PodCondition) SwaggerDoc() map[string]string { + return map_PodCondition +} + +var map_PodExecOptions = map[string]string{ + "": "PodExecOptions is the query options to a Pod's remote exec call.", + "stdin": "Redirect the standard input stream of the pod for this call. Defaults to false.", + "stdout": "Redirect the standard output stream of the pod for this call. Defaults to true.", + "stderr": "Redirect the standard error stream of the pod for this call. Defaults to true.", + "tty": "TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.", + "container": "Container in which to execute the command. Defaults to only container if there is only one container in the pod.", + "command": "Command is the remote command to execute. argv array. Not executed within a shell.", +} + +func (PodExecOptions) SwaggerDoc() map[string]string { + return map_PodExecOptions +} + +var map_PodList = map[string]string{ + "": "PodList is a list of Pods.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of pods. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md", +} + +func (PodList) SwaggerDoc() map[string]string { + return map_PodList +} + +var map_PodLogOptions = map[string]string{ + "": "PodLogOptions is the query options for a Pod's logs REST call.", + "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", + "follow": "Follow the log stream of the pod. Defaults to false.", + "previous": "Return previous terminated container logs. Defaults to false.", + "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", +} + +func (PodLogOptions) SwaggerDoc() map[string]string { + return map_PodLogOptions +} + +var map_PodPortForwardOptions = map[string]string{ + "": "PodPortForwardOptions is the query options to a Pod's port forward call when using WebSockets. The `port` query parameter must specify the port or ports (comma separated) to forward over. Port forwarding over SPDY does not use these options. It requires the port to be passed in the `port` header as part of request.", + "ports": "List of ports to forward Required when using WebSockets", +} + +func (PodPortForwardOptions) SwaggerDoc() map[string]string { + return map_PodPortForwardOptions +} + +var map_PodProxyOptions = map[string]string{ + "": "PodProxyOptions is the query options to a Pod's proxy call.", + "path": "Path is the URL path to use for the current proxy request to pod.", +} + +func (PodProxyOptions) SwaggerDoc() map[string]string { + return map_PodProxyOptions +} + +var map_PodSecurityContext = map[string]string{ + "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", + "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", + "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", +} + +func (PodSecurityContext) SwaggerDoc() map[string]string { + return map_PodSecurityContext +} + +var map_PodSignature = map[string]string{ + "": "Describes the class of pods that should avoid this node. Exactly one field should be set.", + "podController": "Reference to controller whose pods should avoid this node.", +} + +func (PodSignature) SwaggerDoc() map[string]string { + return map_PodSignature +} + +var map_PodSpec = map[string]string{ + "": "PodSpec is a description of a pod.", + "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", + "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", + "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", + "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", + "dnsPolicy": "Set DNS policy for containers within the pod. One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\". To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + "serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", + "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", + "nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", + "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", + "hostPID": "Use the host's pid namespace. Optional: Default to false.", + "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", + "securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", + "hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", + "subdomain": "If specified, the fully qualified Pod hostname will be \"...svc.\". If not specified, the pod will not have a domainname at all.", + "affinity": "If specified, the pod's scheduling constraints", + "schedulerName": "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", + "tolerations": "If specified, the pod's tolerations.", + "hostMappings": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", +} + +func (PodSpec) SwaggerDoc() map[string]string { + return map_PodSpec +} + +var map_PodStatus = map[string]string{ + "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system.", + "phase": "Current condition of the pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", + "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "message": "A human readable message indicating details about why the pod is in this condition.", + "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'", + "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", + "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", + "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md", +} + +func (PodStatus) SwaggerDoc() map[string]string { + return map_PodStatus +} + +var map_PodStatusResult = map[string]string{ + "": "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (PodStatusResult) SwaggerDoc() map[string]string { + return map_PodStatusResult +} + +var map_PodTemplate = map[string]string{ + "": "PodTemplate describes a template for creating copies of a predefined pod.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "template": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (PodTemplate) SwaggerDoc() map[string]string { + return map_PodTemplate +} + +var map_PodTemplateList = map[string]string{ + "": "PodTemplateList is a list of PodTemplates.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of pod templates", +} + +func (PodTemplateList) SwaggerDoc() map[string]string { + return map_PodTemplateList +} + +var map_PodTemplateSpec = map[string]string{ + "": "PodTemplateSpec describes the data a pod should have when created from a template", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (PodTemplateSpec) SwaggerDoc() map[string]string { + return map_PodTemplateSpec +} + +var map_PortworxVolumeSource = map[string]string{ + "": "PortworxVolumeSource represents a Portworx volume resource.", + "volumeID": "VolumeID uniquely identifies a Portworx volume", + "fsType": "FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (PortworxVolumeSource) SwaggerDoc() map[string]string { + return map_PortworxVolumeSource +} + +var map_Preconditions = map[string]string{ + "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "uid": "Specifies the target UID.", +} + +func (Preconditions) SwaggerDoc() map[string]string { + return map_Preconditions +} + +var map_PreferAvoidPodsEntry = map[string]string{ + "": "Describes a class of pods that should avoid this node.", + "podSignature": "The class of pods.", + "evictionTime": "Time at which this entry was added to the list.", + "reason": "(brief) reason why this entry was added to the list.", + "message": "Human readable message indicating why this entry was added to the list.", +} + +func (PreferAvoidPodsEntry) SwaggerDoc() map[string]string { + return map_PreferAvoidPodsEntry +} + +var map_PreferredSchedulingTerm = map[string]string{ + "": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", + "weight": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", + "preference": "A node selector term, associated with the corresponding weight.", +} + +func (PreferredSchedulingTerm) SwaggerDoc() map[string]string { + return map_PreferredSchedulingTerm +} + +var map_Probe = map[string]string{ + "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", + "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", + "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.", + "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", +} + +func (Probe) SwaggerDoc() map[string]string { + return map_Probe +} + +var map_ProjectedVolumeSource = map[string]string{ + "": "Represents a projected volume source", + "sources": "list of volume projections", + "defaultMode": "Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (ProjectedVolumeSource) SwaggerDoc() map[string]string { + return map_ProjectedVolumeSource +} + +var map_QuobyteVolumeSource = map[string]string{ + "": "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.", + "registry": "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", + "volume": "Volume is a string that references an already created Quobyte volume by name.", + "readOnly": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", + "user": "User to map volume access to Defaults to serivceaccount user", + "group": "Group to map volume access to Default is no group", +} + +func (QuobyteVolumeSource) SwaggerDoc() map[string]string { + return map_QuobyteVolumeSource +} + +var map_RBDVolumeSource = map[string]string{ + "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", + "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + "pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", +} + +func (RBDVolumeSource) SwaggerDoc() map[string]string { + return map_RBDVolumeSource +} + +var map_RangeAllocation = map[string]string{ + "": "RangeAllocation is not a public type.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "range": "Range is string that identifies the range represented by 'data'.", + "data": "Data is a bit array containing all allocated addresses in the previous segment.", +} + +func (RangeAllocation) SwaggerDoc() map[string]string { + return map_RangeAllocation +} + +var map_ReplicationController = map[string]string{ + "": "ReplicationController represents the configuration of a replication controller.", + "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (ReplicationController) SwaggerDoc() map[string]string { + return map_ReplicationController +} + +var map_ReplicationControllerCondition = map[string]string{ + "": "ReplicationControllerCondition describes the state of a replication controller at a certain point.", + "type": "Type of replication controller condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (ReplicationControllerCondition) SwaggerDoc() map[string]string { + return map_ReplicationControllerCondition +} + +var map_ReplicationControllerList = map[string]string{ + "": "ReplicationControllerList is a collection of replication controllers.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", +} + +func (ReplicationControllerList) SwaggerDoc() map[string]string { + return map_ReplicationControllerList +} + +var map_ReplicationControllerSpec = map[string]string{ + "": "ReplicationControllerSpec is the specification of a replication controller.", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", +} + +func (ReplicationControllerSpec) SwaggerDoc() map[string]string { + return map_ReplicationControllerSpec +} + +var map_ReplicationControllerStatus = map[string]string{ + "": "ReplicationControllerStatus represents the current status of a replication controller.", + "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", + "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replication controller.", + "readyReplicas": "The number of ready replicas for this replication controller.", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replication controller.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed replication controller.", + "conditions": "Represents the latest available observations of a replication controller's current state.", +} + +func (ReplicationControllerStatus) SwaggerDoc() map[string]string { + return map_ReplicationControllerStatus +} + +var map_ResourceFieldSelector = map[string]string{ + "": "ResourceFieldSelector represents container resources (cpu, memory) and their output format", + "containerName": "Container name: required for volumes, optional for env vars", + "resource": "Required: resource to select", + "divisor": "Specifies the output format of the exposed resources, defaults to \"1\"", +} + +func (ResourceFieldSelector) SwaggerDoc() map[string]string { + return map_ResourceFieldSelector +} + +var map_ResourceQuota = map[string]string{ + "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (ResourceQuota) SwaggerDoc() map[string]string { + return map_ResourceQuota +} + +var map_ResourceQuotaList = map[string]string{ + "": "ResourceQuotaList is a list of ResourceQuota items.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "Items is a list of ResourceQuota objects. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md", +} + +func (ResourceQuotaList) SwaggerDoc() map[string]string { + return map_ResourceQuotaList +} + +var map_ResourceQuotaSpec = map[string]string{ + "": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.", + "hard": "Hard is the set of desired hard limits for each named resource. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md", + "scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", +} + +func (ResourceQuotaSpec) SwaggerDoc() map[string]string { + return map_ResourceQuotaSpec +} + +var map_ResourceQuotaStatus = map[string]string{ + "": "ResourceQuotaStatus defines the enforced hard limits and observed use.", + "hard": "Hard is the set of enforced hard limits for each named resource. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md", + "used": "Used is the current observed total usage of the resource in the namespace.", +} + +func (ResourceQuotaStatus) SwaggerDoc() map[string]string { + return map_ResourceQuotaStatus +} + +var map_ResourceRequirements = map[string]string{ + "": "ResourceRequirements describes the compute resource requirements.", + "limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", + "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", +} + +func (ResourceRequirements) SwaggerDoc() map[string]string { + return map_ResourceRequirements +} + +var map_SELinuxOptions = map[string]string{ + "": "SELinuxOptions are the labels to be applied to the container", + "user": "User is a SELinux user label that applies to the container.", + "role": "Role is a SELinux role label that applies to the container.", + "type": "Type is a SELinux type label that applies to the container.", + "level": "Level is SELinux level label that applies to the container.", +} + +func (SELinuxOptions) SwaggerDoc() map[string]string { + return map_SELinuxOptions +} + +var map_ScaleIOVolumeSource = map[string]string{ + "": "ScaleIOVolumeSource represents a persistent ScaleIO volume", + "gateway": "The host address of the ScaleIO API Gateway.", + "system": "The name of the storage system as configured in ScaleIO.", + "secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", + "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", + "protectionDomain": "The name of the Protection Domain for the configured storage (defaults to \"default\").", + "storagePool": "The Storage Pool associated with the protection domain (defaults to \"default\").", + "storageMode": "Indicates whether the storage for a volume should be thick or thin (defaults to \"thin\").", + "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (ScaleIOVolumeSource) SwaggerDoc() map[string]string { + return map_ScaleIOVolumeSource +} + +var map_Secret = map[string]string{ + "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "data": "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", + "stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.", + "type": "Used to facilitate programmatic handling of secret data.", +} + +func (Secret) SwaggerDoc() map[string]string { + return map_Secret +} + +var map_SecretEnvSource = map[string]string{ + "": "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.", + "optional": "Specify whether the Secret must be defined", +} + +func (SecretEnvSource) SwaggerDoc() map[string]string { + return map_SecretEnvSource +} + +var map_SecretKeySelector = map[string]string{ + "": "SecretKeySelector selects a key of a Secret.", + "key": "The key of the secret to select from. Must be a valid secret key.", + "optional": "Specify whether the Secret or it's key must be defined", +} + +func (SecretKeySelector) SwaggerDoc() map[string]string { + return map_SecretKeySelector +} + +var map_SecretList = map[string]string{ + "": "SecretList is a list of Secret.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret", +} + +func (SecretList) SwaggerDoc() map[string]string { + return map_SecretList +} + +var map_SecretProjection = map[string]string{ + "": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.", + "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "optional": "Specify whether the Secret or its key must be defined", +} + +func (SecretProjection) SwaggerDoc() map[string]string { + return map_SecretProjection +} + +var map_SecretVolumeSource = map[string]string{ + "": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", + "secretName": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "optional": "Specify whether the Secret or it's keys must be defined", +} + +func (SecretVolumeSource) SwaggerDoc() map[string]string { + return map_SecretVolumeSource +} + +var map_SecurityContext = map[string]string{ + "": "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", + "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", + "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", + "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false.", +} + +func (SecurityContext) SwaggerDoc() map[string]string { + return map_SecurityContext +} + +var map_SerializedReference = map[string]string{ + "": "SerializedReference is a reference to serialized object.", + "reference": "The reference to an object in the system.", +} + +func (SerializedReference) SwaggerDoc() map[string]string { + return map_SerializedReference +} + +var map_Service = map[string]string{ + "": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (Service) SwaggerDoc() map[string]string { + return map_Service +} + +var map_ServiceAccount = map[string]string{ + "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret", + "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", + "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", +} + +func (ServiceAccount) SwaggerDoc() map[string]string { + return map_ServiceAccount +} + +var map_ServiceAccountList = map[string]string{ + "": "ServiceAccountList is a list of ServiceAccount objects", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", +} + +func (ServiceAccountList) SwaggerDoc() map[string]string { + return map_ServiceAccountList +} + +var map_ServiceList = map[string]string{ + "": "ServiceList holds a list of services.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of services", +} + +func (ServiceList) SwaggerDoc() map[string]string { + return map_ServiceList +} + +var map_ServicePort = map[string]string{ + "": "ServicePort contains information on service's port.", + "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.", + "protocol": "The IP protocol for this port. Supports \"TCP\" and \"UDP\". Default is TCP.", + "port": "The port that will be exposed by this service.", + "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", + "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", +} + +func (ServicePort) SwaggerDoc() map[string]string { + return map_ServicePort +} + +var map_ServiceProxyOptions = map[string]string{ + "": "ServiceProxyOptions is the query options to a Service's proxy call.", + "path": "Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.", +} + +func (ServiceProxyOptions) SwaggerDoc() map[string]string { + return map_ServiceProxyOptions +} + +var map_ServiceSpec = map[string]string{ + "": "ServiceSpec describes the attributes that a user creates on a service.", + "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", + "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services ", + "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", + "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", + "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", + "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid DNS name and requires Type to be ExternalName.", + "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", + "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", +} + +func (ServiceSpec) SwaggerDoc() map[string]string { + return map_ServiceSpec +} + +var map_ServiceStatus = map[string]string{ + "": "ServiceStatus represents the current status of a service.", + "loadBalancer": "LoadBalancer contains the current status of the load-balancer, if one is present.", +} + +func (ServiceStatus) SwaggerDoc() map[string]string { + return map_ServiceStatus +} + +var map_StorageOSPersistentVolumeSource = map[string]string{ + "": "Represents a StorageOS persistent volume resource.", + "volumeName": "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + "volumeNamespace": "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "secretRef": "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", +} + +func (StorageOSPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_StorageOSPersistentVolumeSource +} + +var map_StorageOSVolumeSource = map[string]string{ + "": "Represents a StorageOS persistent volume resource.", + "volumeName": "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + "volumeNamespace": "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "secretRef": "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", +} + +func (StorageOSVolumeSource) SwaggerDoc() map[string]string { + return map_StorageOSVolumeSource +} + +var map_Sysctl = map[string]string{ + "": "Sysctl defines a kernel parameter to be set", + "Name": "Name of a property to set", + "Value": "Value of a property to set", +} + +func (Sysctl) SwaggerDoc() map[string]string { + return map_Sysctl +} + +var map_TCPSocketAction = map[string]string{ + "": "TCPSocketAction describes an action based on opening a socket", + "port": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", + "host": "Optional: Host name to connect to, defaults to the pod IP.", +} + +func (TCPSocketAction) SwaggerDoc() map[string]string { + return map_TCPSocketAction +} + +var map_Taint = map[string]string{ + "": "The node this Taint is attached to has the effect \"effect\" on any pod that that does not tolerate the Taint.", + "key": "Required. The taint key to be applied to a node.", + "value": "Required. The taint value corresponding to the taint key.", + "effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.", + "timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.", +} + +func (Taint) SwaggerDoc() map[string]string { + return map_Taint +} + +var map_Toleration = map[string]string{ + "": "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .", + "key": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", + "operator": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", + "value": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", + "effect": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", + "tolerationSeconds": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", +} + +func (Toleration) SwaggerDoc() map[string]string { + return map_Toleration +} + +var map_Volume = map[string]string{ + "": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + "name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", +} + +func (Volume) SwaggerDoc() map[string]string { + return map_Volume +} + +var map_VolumeMount = map[string]string{ + "": "VolumeMount describes a mounting of a Volume within a container.", + "name": "This must match the Name of a Volume.", + "readOnly": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", + "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", + "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", +} + +func (VolumeMount) SwaggerDoc() map[string]string { + return map_VolumeMount +} + +var map_VolumeProjection = map[string]string{ + "": "Projection that may be projected along with other supported volume types", + "secret": "information about the secret data to project", + "downwardAPI": "information about the downwardAPI data to project", + "configMap": "information about the configMap data to project", +} + +func (VolumeProjection) SwaggerDoc() map[string]string { + return map_VolumeProjection +} + +var map_VolumeSource = map[string]string{ + "": "Represents the source of a volume to mount. Only one of its members may be specified.", + "hostPath": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "emptyDir": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "gitRepo": "GitRepo represents a git repository at a particular revision.", + "secret": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", + "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", + "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "configMap": "ConfigMap represents a configMap that should populate this volume", + "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "projected": "Items for all in one resources secrets, configmaps, and downward API", + "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", +} + +func (VolumeSource) SwaggerDoc() map[string]string { + return map_VolumeSource +} + +var map_VsphereVirtualDiskVolumeSource = map[string]string{ + "": "Represents a vSphere volume resource.", + "volumePath": "Path that identifies vSphere volume vmdk", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "storagePolicyName": "Storage Policy Based Management (SPBM) profile name.", + "storagePolicyID": "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", +} + +func (VsphereVirtualDiskVolumeSource) SwaggerDoc() map[string]string { + return map_VsphereVirtualDiskVolumeSource +} + +var map_WeightedPodAffinityTerm = map[string]string{ + "": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", + "weight": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", + "podAffinityTerm": "Required. A pod affinity term, associated with the corresponding weight.", +} + +func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string { + return map_WeightedPodAffinityTerm +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go new file mode 100644 index 000000000..2fdecd13f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go @@ -0,0 +1,5168 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + api "k8s.io/client-go/pkg/api" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, + Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, + Convert_v1_Affinity_To_api_Affinity, + Convert_api_Affinity_To_v1_Affinity, + Convert_v1_AttachedVolume_To_api_AttachedVolume, + Convert_api_AttachedVolume_To_v1_AttachedVolume, + Convert_v1_AvoidPods_To_api_AvoidPods, + Convert_api_AvoidPods_To_v1_AvoidPods, + Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource, + Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource, + Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource, + Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource, + Convert_v1_Binding_To_api_Binding, + Convert_api_Binding_To_v1_Binding, + Convert_v1_Capabilities_To_api_Capabilities, + Convert_api_Capabilities_To_v1_Capabilities, + Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource, + Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource, + Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource, + Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource, + Convert_v1_ComponentCondition_To_api_ComponentCondition, + Convert_api_ComponentCondition_To_v1_ComponentCondition, + Convert_v1_ComponentStatus_To_api_ComponentStatus, + Convert_api_ComponentStatus_To_v1_ComponentStatus, + Convert_v1_ComponentStatusList_To_api_ComponentStatusList, + Convert_api_ComponentStatusList_To_v1_ComponentStatusList, + Convert_v1_ConfigMap_To_api_ConfigMap, + Convert_api_ConfigMap_To_v1_ConfigMap, + Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource, + Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource, + Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector, + Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector, + Convert_v1_ConfigMapList_To_api_ConfigMapList, + Convert_api_ConfigMapList_To_v1_ConfigMapList, + Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection, + Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection, + Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource, + Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource, + Convert_v1_Container_To_api_Container, + Convert_api_Container_To_v1_Container, + Convert_v1_ContainerImage_To_api_ContainerImage, + Convert_api_ContainerImage_To_v1_ContainerImage, + Convert_v1_ContainerPort_To_api_ContainerPort, + Convert_api_ContainerPort_To_v1_ContainerPort, + Convert_v1_ContainerState_To_api_ContainerState, + Convert_api_ContainerState_To_v1_ContainerState, + Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning, + Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning, + Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated, + Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated, + Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting, + Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting, + Convert_v1_ContainerStatus_To_api_ContainerStatus, + Convert_api_ContainerStatus_To_v1_ContainerStatus, + Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint, + Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint, + Convert_v1_DeleteOptions_To_api_DeleteOptions, + Convert_api_DeleteOptions_To_v1_DeleteOptions, + Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection, + Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection, + Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile, + Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile, + Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource, + Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource, + Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource, + Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource, + Convert_v1_EndpointAddress_To_api_EndpointAddress, + Convert_api_EndpointAddress_To_v1_EndpointAddress, + Convert_v1_EndpointPort_To_api_EndpointPort, + Convert_api_EndpointPort_To_v1_EndpointPort, + Convert_v1_EndpointSubset_To_api_EndpointSubset, + Convert_api_EndpointSubset_To_v1_EndpointSubset, + Convert_v1_Endpoints_To_api_Endpoints, + Convert_api_Endpoints_To_v1_Endpoints, + Convert_v1_EndpointsList_To_api_EndpointsList, + Convert_api_EndpointsList_To_v1_EndpointsList, + Convert_v1_EnvFromSource_To_api_EnvFromSource, + Convert_api_EnvFromSource_To_v1_EnvFromSource, + Convert_v1_EnvVar_To_api_EnvVar, + Convert_api_EnvVar_To_v1_EnvVar, + Convert_v1_EnvVarSource_To_api_EnvVarSource, + Convert_api_EnvVarSource_To_v1_EnvVarSource, + Convert_v1_Event_To_api_Event, + Convert_api_Event_To_v1_Event, + Convert_v1_EventList_To_api_EventList, + Convert_api_EventList_To_v1_EventList, + Convert_v1_EventSource_To_api_EventSource, + Convert_api_EventSource_To_v1_EventSource, + Convert_v1_ExecAction_To_api_ExecAction, + Convert_api_ExecAction_To_v1_ExecAction, + Convert_v1_FCVolumeSource_To_api_FCVolumeSource, + Convert_api_FCVolumeSource_To_v1_FCVolumeSource, + Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource, + Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource, + Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource, + Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource, + Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource, + Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource, + Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource, + Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource, + Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource, + Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource, + Convert_v1_HTTPGetAction_To_api_HTTPGetAction, + Convert_api_HTTPGetAction_To_v1_HTTPGetAction, + Convert_v1_HTTPHeader_To_api_HTTPHeader, + Convert_api_HTTPHeader_To_v1_HTTPHeader, + Convert_v1_Handler_To_api_Handler, + Convert_api_Handler_To_v1_Handler, + Convert_v1_HostAlias_To_api_HostAlias, + Convert_api_HostAlias_To_v1_HostAlias, + Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource, + Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource, + Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource, + Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource, + Convert_v1_KeyToPath_To_api_KeyToPath, + Convert_api_KeyToPath_To_v1_KeyToPath, + Convert_v1_Lifecycle_To_api_Lifecycle, + Convert_api_Lifecycle_To_v1_Lifecycle, + Convert_v1_LimitRange_To_api_LimitRange, + Convert_api_LimitRange_To_v1_LimitRange, + Convert_v1_LimitRangeItem_To_api_LimitRangeItem, + Convert_api_LimitRangeItem_To_v1_LimitRangeItem, + Convert_v1_LimitRangeList_To_api_LimitRangeList, + Convert_api_LimitRangeList_To_v1_LimitRangeList, + Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec, + Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec, + Convert_v1_List_To_api_List, + Convert_api_List_To_v1_List, + Convert_v1_ListOptions_To_api_ListOptions, + Convert_api_ListOptions_To_v1_ListOptions, + Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress, + Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress, + Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus, + Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus, + Convert_v1_LocalObjectReference_To_api_LocalObjectReference, + Convert_api_LocalObjectReference_To_v1_LocalObjectReference, + Convert_v1_LocalVolumeSource_To_api_LocalVolumeSource, + Convert_api_LocalVolumeSource_To_v1_LocalVolumeSource, + Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource, + Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource, + Convert_v1_Namespace_To_api_Namespace, + Convert_api_Namespace_To_v1_Namespace, + Convert_v1_NamespaceList_To_api_NamespaceList, + Convert_api_NamespaceList_To_v1_NamespaceList, + Convert_v1_NamespaceSpec_To_api_NamespaceSpec, + Convert_api_NamespaceSpec_To_v1_NamespaceSpec, + Convert_v1_NamespaceStatus_To_api_NamespaceStatus, + Convert_api_NamespaceStatus_To_v1_NamespaceStatus, + Convert_v1_Node_To_api_Node, + Convert_api_Node_To_v1_Node, + Convert_v1_NodeAddress_To_api_NodeAddress, + Convert_api_NodeAddress_To_v1_NodeAddress, + Convert_v1_NodeAffinity_To_api_NodeAffinity, + Convert_api_NodeAffinity_To_v1_NodeAffinity, + Convert_v1_NodeCondition_To_api_NodeCondition, + Convert_api_NodeCondition_To_v1_NodeCondition, + Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints, + Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints, + Convert_v1_NodeList_To_api_NodeList, + Convert_api_NodeList_To_v1_NodeList, + Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions, + Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions, + Convert_v1_NodeResources_To_api_NodeResources, + Convert_api_NodeResources_To_v1_NodeResources, + Convert_v1_NodeSelector_To_api_NodeSelector, + Convert_api_NodeSelector_To_v1_NodeSelector, + Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement, + Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement, + Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm, + Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm, + Convert_v1_NodeSpec_To_api_NodeSpec, + Convert_api_NodeSpec_To_v1_NodeSpec, + Convert_v1_NodeStatus_To_api_NodeStatus, + Convert_api_NodeStatus_To_v1_NodeStatus, + Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo, + Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo, + Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector, + Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector, + Convert_v1_ObjectMeta_To_api_ObjectMeta, + Convert_api_ObjectMeta_To_v1_ObjectMeta, + Convert_v1_ObjectReference_To_api_ObjectReference, + Convert_api_ObjectReference_To_v1_ObjectReference, + Convert_v1_PersistentVolume_To_api_PersistentVolume, + Convert_api_PersistentVolume_To_v1_PersistentVolume, + Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim, + Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim, + Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList, + Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList, + Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec, + Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec, + Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus, + Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus, + Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource, + Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource, + Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList, + Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList, + Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource, + Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource, + Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec, + Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec, + Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus, + Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus, + Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource, + Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource, + Convert_v1_Pod_To_api_Pod, + Convert_api_Pod_To_v1_Pod, + Convert_v1_PodAffinity_To_api_PodAffinity, + Convert_api_PodAffinity_To_v1_PodAffinity, + Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm, + Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm, + Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity, + Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity, + Convert_v1_PodAttachOptions_To_api_PodAttachOptions, + Convert_api_PodAttachOptions_To_v1_PodAttachOptions, + Convert_v1_PodCondition_To_api_PodCondition, + Convert_api_PodCondition_To_v1_PodCondition, + Convert_v1_PodExecOptions_To_api_PodExecOptions, + Convert_api_PodExecOptions_To_v1_PodExecOptions, + Convert_v1_PodList_To_api_PodList, + Convert_api_PodList_To_v1_PodList, + Convert_v1_PodLogOptions_To_api_PodLogOptions, + Convert_api_PodLogOptions_To_v1_PodLogOptions, + Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions, + Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions, + Convert_v1_PodProxyOptions_To_api_PodProxyOptions, + Convert_api_PodProxyOptions_To_v1_PodProxyOptions, + Convert_v1_PodSecurityContext_To_api_PodSecurityContext, + Convert_api_PodSecurityContext_To_v1_PodSecurityContext, + Convert_v1_PodSignature_To_api_PodSignature, + Convert_api_PodSignature_To_v1_PodSignature, + Convert_v1_PodSpec_To_api_PodSpec, + Convert_api_PodSpec_To_v1_PodSpec, + Convert_v1_PodStatus_To_api_PodStatus, + Convert_api_PodStatus_To_v1_PodStatus, + Convert_v1_PodStatusResult_To_api_PodStatusResult, + Convert_api_PodStatusResult_To_v1_PodStatusResult, + Convert_v1_PodTemplate_To_api_PodTemplate, + Convert_api_PodTemplate_To_v1_PodTemplate, + Convert_v1_PodTemplateList_To_api_PodTemplateList, + Convert_api_PodTemplateList_To_v1_PodTemplateList, + Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec, + Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec, + Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource, + Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource, + Convert_v1_Preconditions_To_api_Preconditions, + Convert_api_Preconditions_To_v1_Preconditions, + Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry, + Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry, + Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm, + Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm, + Convert_v1_Probe_To_api_Probe, + Convert_api_Probe_To_v1_Probe, + Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource, + Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource, + Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource, + Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource, + Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource, + Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource, + Convert_v1_RangeAllocation_To_api_RangeAllocation, + Convert_api_RangeAllocation_To_v1_RangeAllocation, + Convert_v1_ReplicationController_To_api_ReplicationController, + Convert_api_ReplicationController_To_v1_ReplicationController, + Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition, + Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition, + Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList, + Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList, + Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, + Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, + Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus, + Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus, + Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector, + Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector, + Convert_v1_ResourceQuota_To_api_ResourceQuota, + Convert_api_ResourceQuota_To_v1_ResourceQuota, + Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList, + Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList, + Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec, + Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec, + Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus, + Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus, + Convert_v1_ResourceRequirements_To_api_ResourceRequirements, + Convert_api_ResourceRequirements_To_v1_ResourceRequirements, + Convert_v1_SELinuxOptions_To_api_SELinuxOptions, + Convert_api_SELinuxOptions_To_v1_SELinuxOptions, + Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource, + Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource, + Convert_v1_Secret_To_api_Secret, + Convert_api_Secret_To_v1_Secret, + Convert_v1_SecretEnvSource_To_api_SecretEnvSource, + Convert_api_SecretEnvSource_To_v1_SecretEnvSource, + Convert_v1_SecretKeySelector_To_api_SecretKeySelector, + Convert_api_SecretKeySelector_To_v1_SecretKeySelector, + Convert_v1_SecretList_To_api_SecretList, + Convert_api_SecretList_To_v1_SecretList, + Convert_v1_SecretProjection_To_api_SecretProjection, + Convert_api_SecretProjection_To_v1_SecretProjection, + Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource, + Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource, + Convert_v1_SecurityContext_To_api_SecurityContext, + Convert_api_SecurityContext_To_v1_SecurityContext, + Convert_v1_SerializedReference_To_api_SerializedReference, + Convert_api_SerializedReference_To_v1_SerializedReference, + Convert_v1_Service_To_api_Service, + Convert_api_Service_To_v1_Service, + Convert_v1_ServiceAccount_To_api_ServiceAccount, + Convert_api_ServiceAccount_To_v1_ServiceAccount, + Convert_v1_ServiceAccountList_To_api_ServiceAccountList, + Convert_api_ServiceAccountList_To_v1_ServiceAccountList, + Convert_v1_ServiceList_To_api_ServiceList, + Convert_api_ServiceList_To_v1_ServiceList, + Convert_v1_ServicePort_To_api_ServicePort, + Convert_api_ServicePort_To_v1_ServicePort, + Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions, + Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions, + Convert_v1_ServiceSpec_To_api_ServiceSpec, + Convert_api_ServiceSpec_To_v1_ServiceSpec, + Convert_v1_ServiceStatus_To_api_ServiceStatus, + Convert_api_ServiceStatus_To_v1_ServiceStatus, + Convert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource, + Convert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource, + Convert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource, + Convert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource, + Convert_v1_Sysctl_To_api_Sysctl, + Convert_api_Sysctl_To_v1_Sysctl, + Convert_v1_TCPSocketAction_To_api_TCPSocketAction, + Convert_api_TCPSocketAction_To_v1_TCPSocketAction, + Convert_v1_Taint_To_api_Taint, + Convert_api_Taint_To_v1_Taint, + Convert_v1_Toleration_To_api_Toleration, + Convert_api_Toleration_To_v1_Toleration, + Convert_v1_Volume_To_api_Volume, + Convert_api_Volume_To_v1_Volume, + Convert_v1_VolumeMount_To_api_VolumeMount, + Convert_api_VolumeMount_To_v1_VolumeMount, + Convert_v1_VolumeProjection_To_api_VolumeProjection, + Convert_api_VolumeProjection_To_v1_VolumeProjection, + Convert_v1_VolumeSource_To_api_VolumeSource, + Convert_api_VolumeSource_To_v1_VolumeSource, + Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource, + Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource, + Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm, + Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm, + ) +} + +func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. +func Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in, out, s) +} + +func autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. +func Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + return autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s) +} + +func autoConvert_v1_Affinity_To_api_Affinity(in *Affinity, out *api.Affinity, s conversion.Scope) error { + out.NodeAffinity = (*api.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.PodAffinity = (*api.PodAffinity)(unsafe.Pointer(in.PodAffinity)) + out.PodAntiAffinity = (*api.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) + return nil +} + +// Convert_v1_Affinity_To_api_Affinity is an autogenerated conversion function. +func Convert_v1_Affinity_To_api_Affinity(in *Affinity, out *api.Affinity, s conversion.Scope) error { + return autoConvert_v1_Affinity_To_api_Affinity(in, out, s) +} + +func autoConvert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s conversion.Scope) error { + out.NodeAffinity = (*NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.PodAffinity = (*PodAffinity)(unsafe.Pointer(in.PodAffinity)) + out.PodAntiAffinity = (*PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) + return nil +} + +// Convert_api_Affinity_To_v1_Affinity is an autogenerated conversion function. +func Convert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s conversion.Scope) error { + return autoConvert_api_Affinity_To_v1_Affinity(in, out, s) +} + +func autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in *AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error { + out.Name = api.UniqueVolumeName(in.Name) + out.DevicePath = in.DevicePath + return nil +} + +// Convert_v1_AttachedVolume_To_api_AttachedVolume is an autogenerated conversion function. +func Convert_v1_AttachedVolume_To_api_AttachedVolume(in *AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error { + return autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in, out, s) +} + +func autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *AttachedVolume, s conversion.Scope) error { + out.Name = UniqueVolumeName(in.Name) + out.DevicePath = in.DevicePath + return nil +} + +// Convert_api_AttachedVolume_To_v1_AttachedVolume is an autogenerated conversion function. +func Convert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *AttachedVolume, s conversion.Scope) error { + return autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in, out, s) +} + +func autoConvert_v1_AvoidPods_To_api_AvoidPods(in *AvoidPods, out *api.AvoidPods, s conversion.Scope) error { + out.PreferAvoidPods = *(*[]api.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) + return nil +} + +// Convert_v1_AvoidPods_To_api_AvoidPods is an autogenerated conversion function. +func Convert_v1_AvoidPods_To_api_AvoidPods(in *AvoidPods, out *api.AvoidPods, s conversion.Scope) error { + return autoConvert_v1_AvoidPods_To_api_AvoidPods(in, out, s) +} + +func autoConvert_api_AvoidPods_To_v1_AvoidPods(in *api.AvoidPods, out *AvoidPods, s conversion.Scope) error { + out.PreferAvoidPods = *(*[]PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) + return nil +} + +// Convert_api_AvoidPods_To_v1_AvoidPods is an autogenerated conversion function. +func Convert_api_AvoidPods_To_v1_AvoidPods(in *api.AvoidPods, out *AvoidPods, s conversion.Scope) error { + return autoConvert_api_AvoidPods_To_v1_AvoidPods(in, out, s) +} + +func autoConvert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in *AzureDiskVolumeSource, out *api.AzureDiskVolumeSource, s conversion.Scope) error { + out.DiskName = in.DiskName + out.DataDiskURI = in.DataDiskURI + out.CachingMode = (*api.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) + out.FSType = (*string)(unsafe.Pointer(in.FSType)) + out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) + out.Kind = (*api.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) + return nil +} + +// Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in *AzureDiskVolumeSource, out *api.AzureDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in, out, s) +} + +func autoConvert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *api.AzureDiskVolumeSource, out *AzureDiskVolumeSource, s conversion.Scope) error { + out.DiskName = in.DiskName + out.DataDiskURI = in.DataDiskURI + out.CachingMode = (*AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) + out.FSType = (*string)(unsafe.Pointer(in.FSType)) + out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) + out.Kind = (*AzureDataDiskKind)(unsafe.Pointer(in.Kind)) + return nil +} + +// Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource is an autogenerated conversion function. +func Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *api.AzureDiskVolumeSource, out *AzureDiskVolumeSource, s conversion.Scope) error { + return autoConvert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource is an autogenerated conversion function. +func Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in, out, s) +} + +func autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *AzureFileVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource is an autogenerated conversion function. +func Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *AzureFileVolumeSource, s conversion.Scope) error { + return autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s) +} + +func autoConvert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Binding_To_api_Binding is an autogenerated conversion function. +func Convert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error { + return autoConvert_v1_Binding_To_api_Binding(in, out, s) +} + +func autoConvert_api_Binding_To_v1_Binding(in *api.Binding, out *Binding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +// Convert_api_Binding_To_v1_Binding is an autogenerated conversion function. +func Convert_api_Binding_To_v1_Binding(in *api.Binding, out *Binding, s conversion.Scope) error { + return autoConvert_api_Binding_To_v1_Binding(in, out, s) +} + +func autoConvert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error { + out.Add = *(*[]api.Capability)(unsafe.Pointer(&in.Add)) + out.Drop = *(*[]api.Capability)(unsafe.Pointer(&in.Drop)) + return nil +} + +// Convert_v1_Capabilities_To_api_Capabilities is an autogenerated conversion function. +func Convert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error { + return autoConvert_v1_Capabilities_To_api_Capabilities(in, out, s) +} + +func autoConvert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capabilities, s conversion.Scope) error { + out.Add = *(*[]Capability)(unsafe.Pointer(&in.Add)) + out.Drop = *(*[]Capability)(unsafe.Pointer(&in.Drop)) + return nil +} + +// Convert_api_Capabilities_To_v1_Capabilities is an autogenerated conversion function. +func Convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capabilities, s conversion.Scope) error { + return autoConvert_api_Capabilities_To_v1_Capabilities(in, out, s) +} + +func autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource is an autogenerated conversion function. +func Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in, out, s) +} + +func autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error { + if in.Monitors == nil { + out.Monitors = make([]string, 0) + } else { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + } + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource is an autogenerated conversion function. +func Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error { + return autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s) +} + +func autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource is an autogenerated conversion function. +func Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in, out, s) +} + +func autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource is an autogenerated conversion function. +func Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error { + return autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s) +} + +func autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { + out.Type = api.ComponentConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.Message = in.Message + out.Error = in.Error + return nil +} + +// Convert_v1_ComponentCondition_To_api_ComponentCondition is an autogenerated conversion function. +func Convert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { + return autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in, out, s) +} + +func autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *ComponentCondition, s conversion.Scope) error { + out.Type = ComponentConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.Message = in.Message + out.Error = in.Error + return nil +} + +// Convert_api_ComponentCondition_To_v1_ComponentCondition is an autogenerated conversion function. +func Convert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *ComponentCondition, s conversion.Scope) error { + return autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in, out, s) +} + +func autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Conditions = *(*[]api.ComponentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_ComponentStatus_To_api_ComponentStatus is an autogenerated conversion function. +func Convert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { + return autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in, out, s) +} + +func autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *ComponentStatus, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Conditions = *(*[]ComponentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_api_ComponentStatus_To_v1_ComponentStatus is an autogenerated conversion function. +func Convert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *ComponentStatus, s conversion.Scope) error { + return autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in, out, s) +} + +func autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.ComponentStatus)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ComponentStatusList_To_api_ComponentStatusList is an autogenerated conversion function. +func Convert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { + return autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in, out, s) +} + +func autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *ComponentStatusList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]ComponentStatus, 0) + } else { + out.Items = *(*[]ComponentStatus)(unsafe.Pointer(&in.Items)) + } + return nil +} + +// Convert_api_ComponentStatusList_To_v1_ComponentStatusList is an autogenerated conversion function. +func Convert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *ComponentStatusList, s conversion.Scope) error { + return autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in, out, s) +} + +func autoConvert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_v1_ConfigMap_To_api_ConfigMap is an autogenerated conversion function. +func Convert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error { + return autoConvert_v1_ConfigMap_To_api_ConfigMap(in, out, s) +} + +func autoConvert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *ConfigMap, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_api_ConfigMap_To_v1_ConfigMap is an autogenerated conversion function. +func Convert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *ConfigMap, s conversion.Scope) error { + return autoConvert_api_ConfigMap_To_v1_ConfigMap(in, out, s) +} + +func autoConvert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in *ConfigMapEnvSource, out *api.ConfigMapEnvSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource is an autogenerated conversion function. +func Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in *ConfigMapEnvSource, out *api.ConfigMapEnvSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in, out, s) +} + +func autoConvert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *api.ConfigMapEnvSource, out *ConfigMapEnvSource, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource is an autogenerated conversion function. +func Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *api.ConfigMapEnvSource, out *ConfigMapEnvSource, s conversion.Scope) error { + return autoConvert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in, out, s) +} + +func autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector is an autogenerated conversion function. +func Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { + return autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in, out, s) +} + +func autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *ConfigMapKeySelector, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector is an autogenerated conversion function. +func Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *ConfigMapKeySelector, s conversion.Scope) error { + return autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s) +} + +func autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.ConfigMap)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ConfigMapList_To_api_ConfigMapList is an autogenerated conversion function. +func Convert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { + return autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in, out, s) +} + +func autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]ConfigMap, 0) + } else { + out.Items = *(*[]ConfigMap)(unsafe.Pointer(&in.Items)) + } + return nil +} + +// Convert_api_ConfigMapList_To_v1_ConfigMapList is an autogenerated conversion function. +func Convert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error { + return autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in, out, s) +} + +func autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection is an autogenerated conversion function. +func Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error { + return autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in, out, s) +} + +func autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *ConfigMapProjection, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection is an autogenerated conversion function. +func Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *ConfigMapProjection, s conversion.Scope) error { + return autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in, out, s) +} + +func autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource is an autogenerated conversion function. +func Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in, out, s) +} + +func autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *ConfigMapVolumeSource, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource is an autogenerated conversion function. +func Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *ConfigMapVolumeSource, s conversion.Scope) error { + return autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) +} + +func autoConvert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { + out.Name = in.Name + out.Image = in.Image + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) + out.WorkingDir = in.WorkingDir + out.Ports = *(*[]api.ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]api.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]api.EnvVar)(unsafe.Pointer(&in.Env)) + if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeMounts = *(*[]api.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.LivenessProbe = (*api.Probe)(unsafe.Pointer(in.LivenessProbe)) + out.ReadinessProbe = (*api.Probe)(unsafe.Pointer(in.ReadinessProbe)) + out.Lifecycle = (*api.Lifecycle)(unsafe.Pointer(in.Lifecycle)) + out.TerminationMessagePath = in.TerminationMessagePath + out.TerminationMessagePolicy = api.TerminationMessagePolicy(in.TerminationMessagePolicy) + out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy) + out.SecurityContext = (*api.SecurityContext)(unsafe.Pointer(in.SecurityContext)) + out.Stdin = in.Stdin + out.StdinOnce = in.StdinOnce + out.TTY = in.TTY + return nil +} + +// Convert_v1_Container_To_api_Container is an autogenerated conversion function. +func Convert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { + return autoConvert_v1_Container_To_api_Container(in, out, s) +} + +func autoConvert_api_Container_To_v1_Container(in *api.Container, out *Container, s conversion.Scope) error { + out.Name = in.Name + out.Image = in.Image + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) + out.WorkingDir = in.WorkingDir + out.Ports = *(*[]ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]EnvVar)(unsafe.Pointer(&in.Env)) + if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeMounts = *(*[]VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.LivenessProbe = (*Probe)(unsafe.Pointer(in.LivenessProbe)) + out.ReadinessProbe = (*Probe)(unsafe.Pointer(in.ReadinessProbe)) + out.Lifecycle = (*Lifecycle)(unsafe.Pointer(in.Lifecycle)) + out.TerminationMessagePath = in.TerminationMessagePath + out.TerminationMessagePolicy = TerminationMessagePolicy(in.TerminationMessagePolicy) + out.ImagePullPolicy = PullPolicy(in.ImagePullPolicy) + out.SecurityContext = (*SecurityContext)(unsafe.Pointer(in.SecurityContext)) + out.Stdin = in.Stdin + out.StdinOnce = in.StdinOnce + out.TTY = in.TTY + return nil +} + +// Convert_api_Container_To_v1_Container is an autogenerated conversion function. +func Convert_api_Container_To_v1_Container(in *api.Container, out *Container, s conversion.Scope) error { + return autoConvert_api_Container_To_v1_Container(in, out, s) +} + +func autoConvert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *api.ContainerImage, s conversion.Scope) error { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + out.SizeBytes = in.SizeBytes + return nil +} + +// Convert_v1_ContainerImage_To_api_ContainerImage is an autogenerated conversion function. +func Convert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *api.ContainerImage, s conversion.Scope) error { + return autoConvert_v1_ContainerImage_To_api_ContainerImage(in, out, s) +} + +func autoConvert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *ContainerImage, s conversion.Scope) error { + if in.Names == nil { + out.Names = make([]string, 0) + } else { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + } + out.SizeBytes = in.SizeBytes + return nil +} + +// Convert_api_ContainerImage_To_v1_ContainerImage is an autogenerated conversion function. +func Convert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *ContainerImage, s conversion.Scope) error { + return autoConvert_api_ContainerImage_To_v1_ContainerImage(in, out, s) +} + +func autoConvert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error { + out.Name = in.Name + out.HostPort = in.HostPort + out.ContainerPort = in.ContainerPort + out.Protocol = api.Protocol(in.Protocol) + out.HostIP = in.HostIP + return nil +} + +// Convert_v1_ContainerPort_To_api_ContainerPort is an autogenerated conversion function. +func Convert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error { + return autoConvert_v1_ContainerPort_To_api_ContainerPort(in, out, s) +} + +func autoConvert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *ContainerPort, s conversion.Scope) error { + out.Name = in.Name + out.HostPort = in.HostPort + out.ContainerPort = in.ContainerPort + out.Protocol = Protocol(in.Protocol) + out.HostIP = in.HostIP + return nil +} + +// Convert_api_ContainerPort_To_v1_ContainerPort is an autogenerated conversion function. +func Convert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *ContainerPort, s conversion.Scope) error { + return autoConvert_api_ContainerPort_To_v1_ContainerPort(in, out, s) +} + +func autoConvert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { + out.Waiting = (*api.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) + out.Running = (*api.ContainerStateRunning)(unsafe.Pointer(in.Running)) + out.Terminated = (*api.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) + return nil +} + +// Convert_v1_ContainerState_To_api_ContainerState is an autogenerated conversion function. +func Convert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { + return autoConvert_v1_ContainerState_To_api_ContainerState(in, out, s) +} + +func autoConvert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { + out.Waiting = (*ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) + out.Running = (*ContainerStateRunning)(unsafe.Pointer(in.Running)) + out.Terminated = (*ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) + return nil +} + +// Convert_api_ContainerState_To_v1_ContainerState is an autogenerated conversion function. +func Convert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { + return autoConvert_api_ContainerState_To_v1_ContainerState(in, out, s) +} + +func autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { + out.StartedAt = in.StartedAt + return nil +} + +// Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning is an autogenerated conversion function. +func Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { + return autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in, out, s) +} + +func autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *ContainerStateRunning, s conversion.Scope) error { + out.StartedAt = in.StartedAt + return nil +} + +// Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning is an autogenerated conversion function. +func Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *ContainerStateRunning, s conversion.Scope) error { + return autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in, out, s) +} + +func autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + out.StartedAt = in.StartedAt + out.FinishedAt = in.FinishedAt + out.ContainerID = in.ContainerID + return nil +} + +// Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated is an autogenerated conversion function. +func Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { + return autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in, out, s) +} + +func autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error { + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + out.StartedAt = in.StartedAt + out.FinishedAt = in.FinishedAt + out.ContainerID = in.ContainerID + return nil +} + +// Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated is an autogenerated conversion function. +func Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error { + return autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in, out, s) +} + +func autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting is an autogenerated conversion function. +func Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { + return autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in, out, s) +} + +func autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *ContainerStateWaiting, s conversion.Scope) error { + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting is an autogenerated conversion function. +func Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *ContainerStateWaiting, s conversion.Scope) error { + return autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in, out, s) +} + +func autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1_ContainerState_To_api_ContainerState(&in.State, &out.State, s); err != nil { + return err + } + if err := Convert_v1_ContainerState_To_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { + return err + } + out.Ready = in.Ready + out.RestartCount = in.RestartCount + out.Image = in.Image + out.ImageID = in.ImageID + out.ContainerID = in.ContainerID + return nil +} + +// Convert_v1_ContainerStatus_To_api_ContainerStatus is an autogenerated conversion function. +func Convert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { + return autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in, out, s) +} + +func autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *ContainerStatus, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_api_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil { + return err + } + if err := Convert_api_ContainerState_To_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { + return err + } + out.Ready = in.Ready + out.RestartCount = in.RestartCount + out.Image = in.Image + out.ImageID = in.ImageID + out.ContainerID = in.ContainerID + return nil +} + +// Convert_api_ContainerStatus_To_v1_ContainerStatus is an autogenerated conversion function. +func Convert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *ContainerStatus, s conversion.Scope) error { + return autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in, out, s) +} + +func autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +// Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint is an autogenerated conversion function. +func Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { + return autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in, out, s) +} + +func autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *DaemonEndpoint, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +// Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint is an autogenerated conversion function. +func Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *DaemonEndpoint, s conversion.Scope) error { + return autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s) +} + +func autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { + out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) + out.Preconditions = (*api.Preconditions)(unsafe.Pointer(in.Preconditions)) + out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) + out.PropagationPolicy = (*api.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) + return nil +} + +// Convert_v1_DeleteOptions_To_api_DeleteOptions is an autogenerated conversion function. +func Convert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { + return autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in, out, s) +} + +func autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error { + out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) + out.Preconditions = (*Preconditions)(unsafe.Pointer(in.Preconditions)) + out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) + out.PropagationPolicy = (*DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) + return nil +} + +// Convert_api_DeleteOptions_To_v1_DeleteOptions is an autogenerated conversion function. +func Convert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error { + return autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in, out, s) +} + +func autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]api.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection is an autogenerated conversion function. +func Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in, out, s) +} + +func autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection is an autogenerated conversion function. +func Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *DownwardAPIProjection, s conversion.Scope) error { + return autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in, out, s) +} + +func autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { + out.Path = in.Path + out.FieldRef = (*api.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*api.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile is an autogenerated conversion function. +func Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in, out, s) +} + +func autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, s conversion.Scope) error { + out.Path = in.Path + out.FieldRef = (*ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile is an autogenerated conversion function. +func Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, s conversion.Scope) error { + return autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) +} + +func autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { + out.Items = *(*[]api.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource is an autogenerated conversion function. +func Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in, out, s) +} + +func autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, s conversion.Scope) error { + out.Items = *(*[]DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource is an autogenerated conversion function. +func Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, s conversion.Scope) error { + return autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s) +} + +func autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { + out.Medium = api.StorageMedium(in.Medium) + out.SizeLimit = in.SizeLimit + return nil +} + +// Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource is an autogenerated conversion function. +func Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { + return autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in, out, s) +} + +func autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *EmptyDirVolumeSource, s conversion.Scope) error { + out.Medium = StorageMedium(in.Medium) + out.SizeLimit = in.SizeLimit + return nil +} + +// Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource is an autogenerated conversion function. +func Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *EmptyDirVolumeSource, s conversion.Scope) error { + return autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s) +} + +func autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) + out.TargetRef = (*api.ObjectReference)(unsafe.Pointer(in.TargetRef)) + return nil +} + +// Convert_v1_EndpointAddress_To_api_EndpointAddress is an autogenerated conversion function. +func Convert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { + return autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in, out, s) +} + +func autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *EndpointAddress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) + out.TargetRef = (*ObjectReference)(unsafe.Pointer(in.TargetRef)) + return nil +} + +// Convert_api_EndpointAddress_To_v1_EndpointAddress is an autogenerated conversion function. +func Convert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *EndpointAddress, s conversion.Scope) error { + return autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in, out, s) +} + +func autoConvert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error { + out.Name = in.Name + out.Port = in.Port + out.Protocol = api.Protocol(in.Protocol) + return nil +} + +// Convert_v1_EndpointPort_To_api_EndpointPort is an autogenerated conversion function. +func Convert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error { + return autoConvert_v1_EndpointPort_To_api_EndpointPort(in, out, s) +} + +func autoConvert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *EndpointPort, s conversion.Scope) error { + out.Name = in.Name + out.Port = in.Port + out.Protocol = Protocol(in.Protocol) + return nil +} + +// Convert_api_EndpointPort_To_v1_EndpointPort is an autogenerated conversion function. +func Convert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *EndpointPort, s conversion.Scope) error { + return autoConvert_api_EndpointPort_To_v1_EndpointPort(in, out, s) +} + +func autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { + out.Addresses = *(*[]api.EndpointAddress)(unsafe.Pointer(&in.Addresses)) + out.NotReadyAddresses = *(*[]api.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) + out.Ports = *(*[]api.EndpointPort)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_v1_EndpointSubset_To_api_EndpointSubset is an autogenerated conversion function. +func Convert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { + return autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in, out, s) +} + +func autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *EndpointSubset, s conversion.Scope) error { + out.Addresses = *(*[]EndpointAddress)(unsafe.Pointer(&in.Addresses)) + out.NotReadyAddresses = *(*[]EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) + out.Ports = *(*[]EndpointPort)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_api_EndpointSubset_To_v1_EndpointSubset is an autogenerated conversion function. +func Convert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *EndpointSubset, s conversion.Scope) error { + return autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in, out, s) +} + +func autoConvert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subsets = *(*[]api.EndpointSubset)(unsafe.Pointer(&in.Subsets)) + return nil +} + +// Convert_v1_Endpoints_To_api_Endpoints is an autogenerated conversion function. +func Convert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error { + return autoConvert_v1_Endpoints_To_api_Endpoints(in, out, s) +} + +func autoConvert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Subsets == nil { + out.Subsets = make([]EndpointSubset, 0) + } else { + out.Subsets = *(*[]EndpointSubset)(unsafe.Pointer(&in.Subsets)) + } + return nil +} + +// Convert_api_Endpoints_To_v1_Endpoints is an autogenerated conversion function. +func Convert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s conversion.Scope) error { + return autoConvert_api_Endpoints_To_v1_Endpoints(in, out, s) +} + +func autoConvert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.Endpoints)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_EndpointsList_To_api_EndpointsList is an autogenerated conversion function. +func Convert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error { + return autoConvert_v1_EndpointsList_To_api_EndpointsList(in, out, s) +} + +func autoConvert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *EndpointsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]Endpoints, 0) + } else { + out.Items = *(*[]Endpoints)(unsafe.Pointer(&in.Items)) + } + return nil +} + +// Convert_api_EndpointsList_To_v1_EndpointsList is an autogenerated conversion function. +func Convert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *EndpointsList, s conversion.Scope) error { + return autoConvert_api_EndpointsList_To_v1_EndpointsList(in, out, s) +} + +func autoConvert_v1_EnvFromSource_To_api_EnvFromSource(in *EnvFromSource, out *api.EnvFromSource, s conversion.Scope) error { + out.Prefix = in.Prefix + out.ConfigMapRef = (*api.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*api.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_EnvFromSource_To_api_EnvFromSource is an autogenerated conversion function. +func Convert_v1_EnvFromSource_To_api_EnvFromSource(in *EnvFromSource, out *api.EnvFromSource, s conversion.Scope) error { + return autoConvert_v1_EnvFromSource_To_api_EnvFromSource(in, out, s) +} + +func autoConvert_api_EnvFromSource_To_v1_EnvFromSource(in *api.EnvFromSource, out *EnvFromSource, s conversion.Scope) error { + out.Prefix = in.Prefix + out.ConfigMapRef = (*ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_api_EnvFromSource_To_v1_EnvFromSource is an autogenerated conversion function. +func Convert_api_EnvFromSource_To_v1_EnvFromSource(in *api.EnvFromSource, out *EnvFromSource, s conversion.Scope) error { + return autoConvert_api_EnvFromSource_To_v1_EnvFromSource(in, out, s) +} + +func autoConvert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + out.ValueFrom = (*api.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) + return nil +} + +// Convert_v1_EnvVar_To_api_EnvVar is an autogenerated conversion function. +func Convert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error { + return autoConvert_v1_EnvVar_To_api_EnvVar(in, out, s) +} + +func autoConvert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *EnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + out.ValueFrom = (*EnvVarSource)(unsafe.Pointer(in.ValueFrom)) + return nil +} + +// Convert_api_EnvVar_To_v1_EnvVar is an autogenerated conversion function. +func Convert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *EnvVar, s conversion.Scope) error { + return autoConvert_api_EnvVar_To_v1_EnvVar(in, out, s) +} + +func autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { + out.FieldRef = (*api.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*api.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.ConfigMapKeyRef = (*api.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) + out.SecretKeyRef = (*api.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) + return nil +} + +// Convert_v1_EnvVarSource_To_api_EnvVarSource is an autogenerated conversion function. +func Convert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { + return autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in, out, s) +} + +func autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvVarSource, s conversion.Scope) error { + out.FieldRef = (*ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.ConfigMapKeyRef = (*ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) + out.SecretKeyRef = (*SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) + return nil +} + +// Convert_api_EnvVarSource_To_v1_EnvVarSource is an autogenerated conversion function. +func Convert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvVarSource, s conversion.Scope) error { + return autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in, out, s) +} + +func autoConvert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + if err := Convert_v1_EventSource_To_api_EventSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.FirstTimestamp = in.FirstTimestamp + out.LastTimestamp = in.LastTimestamp + out.Count = in.Count + out.Type = in.Type + return nil +} + +// Convert_v1_Event_To_api_Event is an autogenerated conversion function. +func Convert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error { + return autoConvert_v1_Event_To_api_Event(in, out, s) +} + +func autoConvert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + if err := Convert_api_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.FirstTimestamp = in.FirstTimestamp + out.LastTimestamp = in.LastTimestamp + out.Count = in.Count + out.Type = in.Type + return nil +} + +// Convert_api_Event_To_v1_Event is an autogenerated conversion function. +func Convert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error { + return autoConvert_api_Event_To_v1_Event(in, out, s) +} + +func autoConvert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.Event)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_EventList_To_api_EventList is an autogenerated conversion function. +func Convert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error { + return autoConvert_v1_EventList_To_api_EventList(in, out, s) +} + +func autoConvert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]Event, 0) + } else { + out.Items = *(*[]Event)(unsafe.Pointer(&in.Items)) + } + return nil +} + +// Convert_api_EventList_To_v1_EventList is an autogenerated conversion function. +func Convert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error { + return autoConvert_api_EventList_To_v1_EventList(in, out, s) +} + +func autoConvert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error { + out.Component = in.Component + out.Host = in.Host + return nil +} + +// Convert_v1_EventSource_To_api_EventSource is an autogenerated conversion function. +func Convert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error { + return autoConvert_v1_EventSource_To_api_EventSource(in, out, s) +} + +func autoConvert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *EventSource, s conversion.Scope) error { + out.Component = in.Component + out.Host = in.Host + return nil +} + +// Convert_api_EventSource_To_v1_EventSource is an autogenerated conversion function. +func Convert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *EventSource, s conversion.Scope) error { + return autoConvert_api_EventSource_To_v1_EventSource(in, out, s) +} + +func autoConvert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_v1_ExecAction_To_api_ExecAction is an autogenerated conversion function. +func Convert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error { + return autoConvert_v1_ExecAction_To_api_ExecAction(in, out, s) +} + +func autoConvert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction, s conversion.Scope) error { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_api_ExecAction_To_v1_ExecAction is an autogenerated conversion function. +func Convert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction, s conversion.Scope) error { + return autoConvert_api_ExecAction_To_v1_ExecAction(in, out, s) +} + +func autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { + out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) + out.Lun = (*int32)(unsafe.Pointer(in.Lun)) + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_FCVolumeSource_To_api_FCVolumeSource is an autogenerated conversion function. +func Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in, out, s) +} + +func autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error { + if in.TargetWWNs == nil { + out.TargetWWNs = make([]string, 0) + } else { + out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) + } + out.Lun = (*int32)(unsafe.Pointer(in.Lun)) + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_api_FCVolumeSource_To_v1_FCVolumeSource is an autogenerated conversion function. +func Convert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error { + return autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) +} + +func autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource is an autogenerated conversion function. +func Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in, out, s) +} + +func autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *FlexVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource is an autogenerated conversion function. +func Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *FlexVolumeSource, s conversion.Scope) error { + return autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s) +} + +func autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { + out.DatasetName = in.DatasetName + out.DatasetUUID = in.DatasetUUID + return nil +} + +// Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource is an autogenerated conversion function. +func Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in, out, s) +} + +func autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *FlockerVolumeSource, s conversion.Scope) error { + out.DatasetName = in.DatasetName + out.DatasetUUID = in.DatasetUUID + return nil +} + +// Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource is an autogenerated conversion function. +func Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *FlockerVolumeSource, s conversion.Scope) error { + return autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s) +} + +func autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + out.PDName = in.PDName + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, s conversion.Scope) error { + out.PDName = in.PDName + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { + out.Repository = in.Repository + out.Revision = in.Revision + out.Directory = in.Directory + return nil +} + +// Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource is an autogenerated conversion function. +func Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in, out, s) +} + +func autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *GitRepoVolumeSource, s conversion.Scope) error { + out.Repository = in.Repository + out.Revision = in.Revision + out.Directory = in.Directory + return nil +} + +// Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource is an autogenerated conversion function. +func Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *GitRepoVolumeSource, s conversion.Scope) error { + return autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) +} + +func autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource is an autogenerated conversion function. +func Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in, out, s) +} + +func autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *GlusterfsVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource is an autogenerated conversion function. +func Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *GlusterfsVolumeSource, s conversion.Scope) error { + return autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s) +} + +func autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { + out.Path = in.Path + out.Port = in.Port + out.Host = in.Host + out.Scheme = api.URIScheme(in.Scheme) + out.HTTPHeaders = *(*[]api.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) + return nil +} + +// Convert_v1_HTTPGetAction_To_api_HTTPGetAction is an autogenerated conversion function. +func Convert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { + return autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in, out, s) +} + +func autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *HTTPGetAction, s conversion.Scope) error { + out.Path = in.Path + out.Port = in.Port + out.Host = in.Host + out.Scheme = URIScheme(in.Scheme) + out.HTTPHeaders = *(*[]HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) + return nil +} + +// Convert_api_HTTPGetAction_To_v1_HTTPGetAction is an autogenerated conversion function. +func Convert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *HTTPGetAction, s conversion.Scope) error { + return autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in, out, s) +} + +func autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in *HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_v1_HTTPHeader_To_api_HTTPHeader is an autogenerated conversion function. +func Convert_v1_HTTPHeader_To_api_HTTPHeader(in *HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { + return autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in, out, s) +} + +func autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *HTTPHeader, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_api_HTTPHeader_To_v1_HTTPHeader is an autogenerated conversion function. +func Convert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *HTTPHeader, s conversion.Scope) error { + return autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in, out, s) +} + +func autoConvert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error { + out.Exec = (*api.ExecAction)(unsafe.Pointer(in.Exec)) + out.HTTPGet = (*api.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) + out.TCPSocket = (*api.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + return nil +} + +// Convert_v1_Handler_To_api_Handler is an autogenerated conversion function. +func Convert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error { + return autoConvert_v1_Handler_To_api_Handler(in, out, s) +} + +func autoConvert_api_Handler_To_v1_Handler(in *api.Handler, out *Handler, s conversion.Scope) error { + out.Exec = (*ExecAction)(unsafe.Pointer(in.Exec)) + out.HTTPGet = (*HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) + out.TCPSocket = (*TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + return nil +} + +// Convert_api_Handler_To_v1_Handler is an autogenerated conversion function. +func Convert_api_Handler_To_v1_Handler(in *api.Handler, out *Handler, s conversion.Scope) error { + return autoConvert_api_Handler_To_v1_Handler(in, out, s) +} + +func autoConvert_v1_HostAlias_To_api_HostAlias(in *HostAlias, out *api.HostAlias, s conversion.Scope) error { + out.IP = in.IP + out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) + return nil +} + +// Convert_v1_HostAlias_To_api_HostAlias is an autogenerated conversion function. +func Convert_v1_HostAlias_To_api_HostAlias(in *HostAlias, out *api.HostAlias, s conversion.Scope) error { + return autoConvert_v1_HostAlias_To_api_HostAlias(in, out, s) +} + +func autoConvert_api_HostAlias_To_v1_HostAlias(in *api.HostAlias, out *HostAlias, s conversion.Scope) error { + out.IP = in.IP + out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) + return nil +} + +// Convert_api_HostAlias_To_v1_HostAlias is an autogenerated conversion function. +func Convert_api_HostAlias_To_v1_HostAlias(in *api.HostAlias, out *HostAlias, s conversion.Scope) error { + return autoConvert_api_HostAlias_To_v1_HostAlias(in, out, s) +} + +func autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource is an autogenerated conversion function. +func Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { + return autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in, out, s) +} + +func autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *HostPathVolumeSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource is an autogenerated conversion function. +func Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *HostPathVolumeSource, s conversion.Scope) error { + return autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s) +} + +func autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource is an autogenerated conversion function. +func Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in, out, s) +} + +func autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource is an autogenerated conversion function. +func Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error { + return autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) +} + +func autoConvert_v1_KeyToPath_To_api_KeyToPath(in *KeyToPath, out *api.KeyToPath, s conversion.Scope) error { + out.Key = in.Key + out.Path = in.Path + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_v1_KeyToPath_To_api_KeyToPath is an autogenerated conversion function. +func Convert_v1_KeyToPath_To_api_KeyToPath(in *KeyToPath, out *api.KeyToPath, s conversion.Scope) error { + return autoConvert_v1_KeyToPath_To_api_KeyToPath(in, out, s) +} + +func autoConvert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *KeyToPath, s conversion.Scope) error { + out.Key = in.Key + out.Path = in.Path + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_api_KeyToPath_To_v1_KeyToPath is an autogenerated conversion function. +func Convert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *KeyToPath, s conversion.Scope) error { + return autoConvert_api_KeyToPath_To_v1_KeyToPath(in, out, s) +} + +func autoConvert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error { + out.PostStart = (*api.Handler)(unsafe.Pointer(in.PostStart)) + out.PreStop = (*api.Handler)(unsafe.Pointer(in.PreStop)) + return nil +} + +// Convert_v1_Lifecycle_To_api_Lifecycle is an autogenerated conversion function. +func Convert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error { + return autoConvert_v1_Lifecycle_To_api_Lifecycle(in, out, s) +} + +func autoConvert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s conversion.Scope) error { + out.PostStart = (*Handler)(unsafe.Pointer(in.PostStart)) + out.PreStop = (*Handler)(unsafe.Pointer(in.PreStop)) + return nil +} + +// Convert_api_Lifecycle_To_v1_Lifecycle is an autogenerated conversion function. +func Convert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s conversion.Scope) error { + return autoConvert_api_Lifecycle_To_v1_Lifecycle(in, out, s) +} + +func autoConvert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1_LimitRange_To_api_LimitRange is an autogenerated conversion function. +func Convert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error { + return autoConvert_v1_LimitRange_To_api_LimitRange(in, out, s) +} + +func autoConvert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *LimitRange, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_api_LimitRange_To_v1_LimitRange is an autogenerated conversion function. +func Convert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *LimitRange, s conversion.Scope) error { + return autoConvert_api_LimitRange_To_v1_LimitRange(in, out, s) +} + +func autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { + out.Type = api.LimitType(in.Type) + out.Max = *(*api.ResourceList)(unsafe.Pointer(&in.Max)) + out.Min = *(*api.ResourceList)(unsafe.Pointer(&in.Min)) + out.Default = *(*api.ResourceList)(unsafe.Pointer(&in.Default)) + out.DefaultRequest = *(*api.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) + out.MaxLimitRequestRatio = *(*api.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) + return nil +} + +// Convert_v1_LimitRangeItem_To_api_LimitRangeItem is an autogenerated conversion function. +func Convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { + return autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in, out, s) +} + +func autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *LimitRangeItem, s conversion.Scope) error { + out.Type = LimitType(in.Type) + out.Max = *(*ResourceList)(unsafe.Pointer(&in.Max)) + out.Min = *(*ResourceList)(unsafe.Pointer(&in.Min)) + out.Default = *(*ResourceList)(unsafe.Pointer(&in.Default)) + out.DefaultRequest = *(*ResourceList)(unsafe.Pointer(&in.DefaultRequest)) + out.MaxLimitRequestRatio = *(*ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) + return nil +} + +// Convert_api_LimitRangeItem_To_v1_LimitRangeItem is an autogenerated conversion function. +func Convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *LimitRangeItem, s conversion.Scope) error { + return autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in, out, s) +} + +func autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.LimitRange)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_LimitRangeList_To_api_LimitRangeList is an autogenerated conversion function. +func Convert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { + return autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in, out, s) +} + +func autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *LimitRangeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]LimitRange, 0) + } else { + out.Items = *(*[]LimitRange)(unsafe.Pointer(&in.Items)) + } + return nil +} + +// Convert_api_LimitRangeList_To_v1_LimitRangeList is an autogenerated conversion function. +func Convert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *LimitRangeList, s conversion.Scope) error { + return autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in, out, s) +} + +func autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { + out.Limits = *(*[]api.LimitRangeItem)(unsafe.Pointer(&in.Limits)) + return nil +} + +// Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec is an autogenerated conversion function. +func Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { + return autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in, out, s) +} + +func autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error { + if in.Limits == nil { + out.Limits = make([]LimitRangeItem, 0) + } else { + out.Limits = *(*[]LimitRangeItem)(unsafe.Pointer(&in.Limits)) + } + return nil +} + +// Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec is an autogenerated conversion function. +func Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error { + return autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s) +} + +func autoConvert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_List_To_api_List is an autogenerated conversion function. +func Convert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error { + return autoConvert_v1_List_To_api_List(in, out, s) +} + +func autoConvert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]runtime.RawExtension, 0) + } + return nil +} + +// Convert_api_List_To_v1_List is an autogenerated conversion function. +func Convert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) error { + return autoConvert_api_List_To_v1_List(in, out, s) +} + +func autoConvert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { + if err := meta_v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := meta_v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.IncludeUninitialized = in.IncludeUninitialized + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +// Convert_v1_ListOptions_To_api_ListOptions is an autogenerated conversion function. +func Convert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { + return autoConvert_v1_ListOptions_To_api_ListOptions(in, out, s) +} + +func autoConvert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error { + if err := meta_v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := meta_v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.IncludeUninitialized = in.IncludeUninitialized + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +// Convert_api_ListOptions_To_v1_ListOptions is an autogenerated conversion function. +func Convert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error { + return autoConvert_api_ListOptions_To_v1_ListOptions(in, out, s) +} + +func autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + return nil +} + +// Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress is an autogenerated conversion function. +func Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in, out, s) +} + +func autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *LoadBalancerIngress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + return nil +} + +// Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress is an autogenerated conversion function. +func Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *LoadBalancerIngress, s conversion.Scope) error { + return autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s) +} + +func autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { + out.Ingress = *(*[]api.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) + return nil +} + +// Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus is an autogenerated conversion function. +func Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in, out, s) +} + +func autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error { + out.Ingress = *(*[]LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) + return nil +} + +// Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus is an autogenerated conversion function. +func Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) +} + +func autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +// Convert_v1_LocalObjectReference_To_api_LocalObjectReference is an autogenerated conversion function. +func Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { + return autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in, out, s) +} + +func autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *LocalObjectReference, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +// Convert_api_LocalObjectReference_To_v1_LocalObjectReference is an autogenerated conversion function. +func Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *LocalObjectReference, s conversion.Scope) error { + return autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in, out, s) +} + +func autoConvert_v1_LocalVolumeSource_To_api_LocalVolumeSource(in *LocalVolumeSource, out *api.LocalVolumeSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_LocalVolumeSource_To_api_LocalVolumeSource is an autogenerated conversion function. +func Convert_v1_LocalVolumeSource_To_api_LocalVolumeSource(in *LocalVolumeSource, out *api.LocalVolumeSource, s conversion.Scope) error { + return autoConvert_v1_LocalVolumeSource_To_api_LocalVolumeSource(in, out, s) +} + +func autoConvert_api_LocalVolumeSource_To_v1_LocalVolumeSource(in *api.LocalVolumeSource, out *LocalVolumeSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_api_LocalVolumeSource_To_v1_LocalVolumeSource is an autogenerated conversion function. +func Convert_api_LocalVolumeSource_To_v1_LocalVolumeSource(in *api.LocalVolumeSource, out *LocalVolumeSource, s conversion.Scope) error { + return autoConvert_api_LocalVolumeSource_To_v1_LocalVolumeSource(in, out, s) +} + +func autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { + out.Server = in.Server + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource is an autogenerated conversion function. +func Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in, out, s) +} + +func autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *NFSVolumeSource, s conversion.Scope) error { + out.Server = in.Server + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource is an autogenerated conversion function. +func Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *NFSVolumeSource, s conversion.Scope) error { + return autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s) +} + +func autoConvert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_NamespaceSpec_To_api_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_NamespaceStatus_To_api_NamespaceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Namespace_To_api_Namespace is an autogenerated conversion function. +func Convert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error { + return autoConvert_v1_Namespace_To_api_Namespace(in, out, s) +} + +func autoConvert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *Namespace, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_NamespaceStatus_To_v1_NamespaceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_api_Namespace_To_v1_Namespace is an autogenerated conversion function. +func Convert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *Namespace, s conversion.Scope) error { + return autoConvert_api_Namespace_To_v1_Namespace(in, out, s) +} + +func autoConvert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.Namespace)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_NamespaceList_To_api_NamespaceList is an autogenerated conversion function. +func Convert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error { + return autoConvert_v1_NamespaceList_To_api_NamespaceList(in, out, s) +} + +func autoConvert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]Namespace, 0) + } else { + out.Items = *(*[]Namespace)(unsafe.Pointer(&in.Items)) + } + return nil +} + +// Convert_api_NamespaceList_To_v1_NamespaceList is an autogenerated conversion function. +func Convert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error { + return autoConvert_api_NamespaceList_To_v1_NamespaceList(in, out, s) +} + +func autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { + out.Finalizers = *(*[]api.FinalizerName)(unsafe.Pointer(&in.Finalizers)) + return nil +} + +// Convert_v1_NamespaceSpec_To_api_NamespaceSpec is an autogenerated conversion function. +func Convert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { + return autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in, out, s) +} + +func autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error { + out.Finalizers = *(*[]FinalizerName)(unsafe.Pointer(&in.Finalizers)) + return nil +} + +// Convert_api_NamespaceSpec_To_v1_NamespaceSpec is an autogenerated conversion function. +func Convert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error { + return autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in, out, s) +} + +func autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { + out.Phase = api.NamespacePhase(in.Phase) + return nil +} + +// Convert_v1_NamespaceStatus_To_api_NamespaceStatus is an autogenerated conversion function. +func Convert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { + return autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in, out, s) +} + +func autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *NamespaceStatus, s conversion.Scope) error { + out.Phase = NamespacePhase(in.Phase) + return nil +} + +// Convert_api_NamespaceStatus_To_v1_NamespaceStatus is an autogenerated conversion function. +func Convert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *NamespaceStatus, s conversion.Scope) error { + return autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in, out, s) +} + +func autoConvert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_NodeSpec_To_api_NodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_NodeStatus_To_api_NodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Node_To_api_Node is an autogenerated conversion function. +func Convert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error { + return autoConvert_v1_Node_To_api_Node(in, out, s) +} + +func autoConvert_api_Node_To_v1_Node(in *api.Node, out *Node, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_NodeStatus_To_v1_NodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_api_Node_To_v1_Node is an autogenerated conversion function. +func Convert_api_Node_To_v1_Node(in *api.Node, out *Node, s conversion.Scope) error { + return autoConvert_api_Node_To_v1_Node(in, out, s) +} + +func autoConvert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error { + out.Type = api.NodeAddressType(in.Type) + out.Address = in.Address + return nil +} + +// Convert_v1_NodeAddress_To_api_NodeAddress is an autogenerated conversion function. +func Convert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error { + return autoConvert_v1_NodeAddress_To_api_NodeAddress(in, out, s) +} + +func autoConvert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *NodeAddress, s conversion.Scope) error { + out.Type = NodeAddressType(in.Type) + out.Address = in.Address + return nil +} + +// Convert_api_NodeAddress_To_v1_NodeAddress is an autogenerated conversion function. +func Convert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *NodeAddress, s conversion.Scope) error { + return autoConvert_api_NodeAddress_To_v1_NodeAddress(in, out, s) +} + +func autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in *NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = (*api.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_v1_NodeAffinity_To_api_NodeAffinity is an autogenerated conversion function. +func Convert_v1_NodeAffinity_To_api_NodeAffinity(in *NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { + return autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in, out, s) +} + +func autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *NodeAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = (*NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_api_NodeAffinity_To_v1_NodeAffinity is an autogenerated conversion function. +func Convert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *NodeAffinity, s conversion.Scope) error { + return autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in, out, s) +} + +func autoConvert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error { + out.Type = api.NodeConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.LastHeartbeatTime = in.LastHeartbeatTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_NodeCondition_To_api_NodeCondition is an autogenerated conversion function. +func Convert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error { + return autoConvert_v1_NodeCondition_To_api_NodeCondition(in, out, s) +} + +func autoConvert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *NodeCondition, s conversion.Scope) error { + out.Type = NodeConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastHeartbeatTime = in.LastHeartbeatTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_api_NodeCondition_To_v1_NodeCondition is an autogenerated conversion function. +func Convert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *NodeCondition, s conversion.Scope) error { + return autoConvert_api_NodeCondition_To_v1_NodeCondition(in, out, s) +} + +func autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { + if err := Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { + return err + } + return nil +} + +// Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints is an autogenerated conversion function. +func Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { + return autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in, out, s) +} + +func autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *NodeDaemonEndpoints, s conversion.Scope) error { + if err := Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { + return err + } + return nil +} + +// Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints is an autogenerated conversion function. +func Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *NodeDaemonEndpoints, s conversion.Scope) error { + return autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s) +} + +func autoConvert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.Node)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_NodeList_To_api_NodeList is an autogenerated conversion function. +func Convert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error { + return autoConvert_v1_NodeList_To_api_NodeList(in, out, s) +} + +func autoConvert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]Node, 0) + } else { + out.Items = *(*[]Node)(unsafe.Pointer(&in.Items)) + } + return nil +} + +// Convert_api_NodeList_To_v1_NodeList is an autogenerated conversion function. +func Convert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conversion.Scope) error { + return autoConvert_api_NodeList_To_v1_NodeList(in, out, s) +} + +func autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions is an autogenerated conversion function. +func Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { + return autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in, out, s) +} + +func autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *NodeProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions is an autogenerated conversion function. +func Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *NodeProxyOptions, s conversion.Scope) error { + return autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s) +} + +func autoConvert_v1_NodeResources_To_api_NodeResources(in *NodeResources, out *api.NodeResources, s conversion.Scope) error { + out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +// Convert_v1_NodeResources_To_api_NodeResources is an autogenerated conversion function. +func Convert_v1_NodeResources_To_api_NodeResources(in *NodeResources, out *api.NodeResources, s conversion.Scope) error { + return autoConvert_v1_NodeResources_To_api_NodeResources(in, out, s) +} + +func autoConvert_api_NodeResources_To_v1_NodeResources(in *api.NodeResources, out *NodeResources, s conversion.Scope) error { + out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +// Convert_api_NodeResources_To_v1_NodeResources is an autogenerated conversion function. +func Convert_api_NodeResources_To_v1_NodeResources(in *api.NodeResources, out *NodeResources, s conversion.Scope) error { + return autoConvert_api_NodeResources_To_v1_NodeResources(in, out, s) +} + +func autoConvert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.NodeSelector, s conversion.Scope) error { + out.NodeSelectorTerms = *(*[]api.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) + return nil +} + +// Convert_v1_NodeSelector_To_api_NodeSelector is an autogenerated conversion function. +func Convert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.NodeSelector, s conversion.Scope) error { + return autoConvert_v1_NodeSelector_To_api_NodeSelector(in, out, s) +} + +func autoConvert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *NodeSelector, s conversion.Scope) error { + if in.NodeSelectorTerms == nil { + out.NodeSelectorTerms = make([]NodeSelectorTerm, 0) + } else { + out.NodeSelectorTerms = *(*[]NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) + } + return nil +} + +// Convert_api_NodeSelector_To_v1_NodeSelector is an autogenerated conversion function. +func Convert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *NodeSelector, s conversion.Scope) error { + return autoConvert_api_NodeSelector_To_v1_NodeSelector(in, out, s) +} + +func autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = api.NodeSelectorOperator(in.Operator) + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +// Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement is an autogenerated conversion function. +func Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { + return autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in, out, s) +} + +func autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *NodeSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = NodeSelectorOperator(in.Operator) + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +// Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement is an autogenerated conversion function. +func Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *NodeSelectorRequirement, s conversion.Scope) error { + return autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in, out, s) +} + +func autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { + out.MatchExpressions = *(*[]api.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + return nil +} + +// Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm is an autogenerated conversion function. +func Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { + return autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in, out, s) +} + +func autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *NodeSelectorTerm, s conversion.Scope) error { + if in.MatchExpressions == nil { + out.MatchExpressions = make([]NodeSelectorRequirement, 0) + } else { + out.MatchExpressions = *(*[]NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + } + return nil +} + +// Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm is an autogenerated conversion function. +func Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *NodeSelectorTerm, s conversion.Scope) error { + return autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in, out, s) +} + +func autoConvert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error { + out.PodCIDR = in.PodCIDR + out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID + out.Unschedulable = in.Unschedulable + out.Taints = *(*[]api.Taint)(unsafe.Pointer(&in.Taints)) + return nil +} + +// Convert_v1_NodeSpec_To_api_NodeSpec is an autogenerated conversion function. +func Convert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error { + return autoConvert_v1_NodeSpec_To_api_NodeSpec(in, out, s) +} + +func autoConvert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error { + out.PodCIDR = in.PodCIDR + out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID + out.Unschedulable = in.Unschedulable + out.Taints = *(*[]Taint)(unsafe.Pointer(&in.Taints)) + return nil +} + +// Convert_api_NodeSpec_To_v1_NodeSpec is an autogenerated conversion function. +func Convert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error { + return autoConvert_api_NodeSpec_To_v1_NodeSpec(in, out, s) +} + +func autoConvert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error { + out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*api.ResourceList)(unsafe.Pointer(&in.Allocatable)) + out.Phase = api.NodePhase(in.Phase) + out.Conditions = *(*[]api.NodeCondition)(unsafe.Pointer(&in.Conditions)) + out.Addresses = *(*[]api.NodeAddress)(unsafe.Pointer(&in.Addresses)) + if err := Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { + return err + } + if err := Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { + return err + } + out.Images = *(*[]api.ContainerImage)(unsafe.Pointer(&in.Images)) + out.VolumesInUse = *(*[]api.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) + out.VolumesAttached = *(*[]api.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) + return nil +} + +// Convert_v1_NodeStatus_To_api_NodeStatus is an autogenerated conversion function. +func Convert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error { + return autoConvert_v1_NodeStatus_To_api_NodeStatus(in, out, s) +} + +func autoConvert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error { + out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*ResourceList)(unsafe.Pointer(&in.Allocatable)) + out.Phase = NodePhase(in.Phase) + out.Conditions = *(*[]NodeCondition)(unsafe.Pointer(&in.Conditions)) + out.Addresses = *(*[]NodeAddress)(unsafe.Pointer(&in.Addresses)) + if err := Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { + return err + } + if err := Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { + return err + } + out.Images = *(*[]ContainerImage)(unsafe.Pointer(&in.Images)) + out.VolumesInUse = *(*[]UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) + out.VolumesAttached = *(*[]AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) + return nil +} + +// Convert_api_NodeStatus_To_v1_NodeStatus is an autogenerated conversion function. +func Convert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error { + return autoConvert_api_NodeStatus_To_v1_NodeStatus(in, out, s) +} + +func autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { + out.MachineID = in.MachineID + out.SystemUUID = in.SystemUUID + out.BootID = in.BootID + out.KernelVersion = in.KernelVersion + out.OSImage = in.OSImage + out.ContainerRuntimeVersion = in.ContainerRuntimeVersion + out.KubeletVersion = in.KubeletVersion + out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture + return nil +} + +// Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo is an autogenerated conversion function. +func Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { + return autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in, out, s) +} + +func autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error { + out.MachineID = in.MachineID + out.SystemUUID = in.SystemUUID + out.BootID = in.BootID + out.KernelVersion = in.KernelVersion + out.OSImage = in.OSImage + out.ContainerRuntimeVersion = in.ContainerRuntimeVersion + out.KubeletVersion = in.KubeletVersion + out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture + return nil +} + +// Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo is an autogenerated conversion function. +func Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error { + return autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s) +} + +func autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector is an autogenerated conversion function. +func Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { + return autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in, out, s) +} + +func autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector is an autogenerated conversion function. +func Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error { + return autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) +} + +func autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { + out.Name = in.Name + out.GenerateName = in.GenerateName + out.Namespace = in.Namespace + out.SelfLink = in.SelfLink + out.UID = types.UID(in.UID) + out.ResourceVersion = in.ResourceVersion + out.Generation = in.Generation + out.CreationTimestamp = in.CreationTimestamp + out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) + out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) + out.Initializers = (*meta_v1.Initializers)(unsafe.Pointer(in.Initializers)) + out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) + out.ClusterName = in.ClusterName + return nil +} + +// Convert_v1_ObjectMeta_To_api_ObjectMeta is an autogenerated conversion function. +func Convert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { + return autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in, out, s) +} + +func autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error { + out.Name = in.Name + out.GenerateName = in.GenerateName + out.Namespace = in.Namespace + out.SelfLink = in.SelfLink + out.UID = types.UID(in.UID) + out.ResourceVersion = in.ResourceVersion + out.Generation = in.Generation + out.CreationTimestamp = in.CreationTimestamp + out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) + out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) + out.Initializers = (*meta_v1.Initializers)(unsafe.Pointer(in.Initializers)) + out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) + out.ClusterName = in.ClusterName + return nil +} + +// Convert_api_ObjectMeta_To_v1_ObjectMeta is an autogenerated conversion function. +func Convert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error { + return autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in, out, s) +} + +func autoConvert_v1_ObjectReference_To_api_ObjectReference(in *ObjectReference, out *api.ObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_v1_ObjectReference_To_api_ObjectReference is an autogenerated conversion function. +func Convert_v1_ObjectReference_To_api_ObjectReference(in *ObjectReference, out *api.ObjectReference, s conversion.Scope) error { + return autoConvert_v1_ObjectReference_To_api_ObjectReference(in, out, s) +} + +func autoConvert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_api_ObjectReference_To_v1_ObjectReference is an autogenerated conversion function. +func Convert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error { + return autoConvert_api_ObjectReference_To_v1_ObjectReference(in, out, s) +} + +func autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PersistentVolume_To_api_PersistentVolume is an autogenerated conversion function. +func Convert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { + return autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in, out, s) +} + +func autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_api_PersistentVolume_To_v1_PersistentVolume is an autogenerated conversion function. +func Convert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error { + return autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in, out, s) +} + +func autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim is an autogenerated conversion function. +func Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in, out, s) +} + +func autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]PersistentVolumeClaim, 0) + } else { + out.Items = *(*[]PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + } + return nil +} + +// Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList is an autogenerated conversion function. +func Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { + out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeName = in.VolumeName + out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) + return nil +} + +// Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in, out, s) +} + +func autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error { + out.AccessModes = *(*[]PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeName = in.VolumeName + out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) + return nil +} + +// Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec is an autogenerated conversion function. +func Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { + out.Phase = api.PersistentVolumeClaimPhase(in.Phase) + out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +// Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in, out, s) +} + +func autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error { + out.Phase = PersistentVolumeClaimPhase(in.Phase) + out.AccessModes = *(*[]PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +// Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus is an autogenerated conversion function. +func Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + out.ClaimName = in.ClaimName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in, out, s) +} + +func autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + out.ClaimName = in.ClaimName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. +func Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) +} + +func autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.PersistentVolume, len(*in)) + for i := range *in { + if err := Convert_v1_PersistentVolume_To_api_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList is an autogenerated conversion function. +func Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in, out, s) +} + +func autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + if err := Convert_api_PersistentVolume_To_v1_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]PersistentVolume, 0) + } + return nil +} + +// Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList is an autogenerated conversion function. +func Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s) +} + +func autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { + out.GCEPersistentDisk = (*api.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*api.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.Glusterfs = (*api.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.NFS = (*api.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.RBD = (*api.RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.ISCSI = (*api.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Cinder = (*api.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*api.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.FC = (*api.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.Flocker = (*api.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.FlexVolume = (*api.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.AzureFile = (*api.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.VsphereVolume = (*api.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.Local = (*api.LocalVolumeSource)(unsafe.Pointer(in.Local)) + out.StorageOS = (*api.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) + return nil +} + +// Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in, out, s) +} + +func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error { + out.GCEPersistentDisk = (*GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.HostPath = (*HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.Glusterfs = (*GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.NFS = (*NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.RBD = (*RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.Quobyte = (*QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.ISCSI = (*ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.FlexVolume = (*FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.FC = (*FCVolumeSource)(unsafe.Pointer(in.FC)) + out.Flocker = (*FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.AzureFile = (*AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.VsphereVolume = (*VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.AzureDisk = (*AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.Local = (*LocalVolumeSource)(unsafe.Pointer(in.Local)) + out.StorageOS = (*StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) + return nil +} + +// Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource is an autogenerated conversion function. +func Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { + out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) + if err := Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { + return err + } + out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.ClaimRef = (*api.ObjectReference)(unsafe.Pointer(in.ClaimRef)) + out.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.StorageClassName = in.StorageClassName + return nil +} + +// Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec is an autogenerated conversion function. +func Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in, out, s) +} + +func autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error { + out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) + if err := Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { + return err + } + out.AccessModes = *(*[]PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.ClaimRef = (*ObjectReference)(unsafe.Pointer(in.ClaimRef)) + out.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.StorageClassName = in.StorageClassName + return nil +} + +// Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec is an autogenerated conversion function. +func Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s) +} + +func autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { + out.Phase = api.PersistentVolumePhase(in.Phase) + out.Message = in.Message + out.Reason = in.Reason + return nil +} + +// Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus is an autogenerated conversion function. +func Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in, out, s) +} + +func autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error { + out.Phase = PersistentVolumePhase(in.Phase) + out.Message = in.Message + out.Reason = in.Reason + return nil +} + +// Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus is an autogenerated conversion function. +func Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s) +} + +func autoConvert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in *PhotonPersistentDiskVolumeSource, out *api.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + out.PdID = in.PdID + out.FSType = in.FSType + return nil +} + +// Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in *PhotonPersistentDiskVolumeSource, out *api.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *api.PhotonPersistentDiskVolumeSource, out *PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + out.PdID = in.PdID + out.FSType = in.FSType + return nil +} + +// Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *api.PhotonPersistentDiskVolumeSource, out *PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]api.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_v1_PodAffinity_To_api_PodAffinity is an autogenerated conversion function. +func Convert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAffinity_To_api_PodAffinity(in, out, s) +} + +func autoConvert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_api_PodAffinity_To_v1_PodAffinity is an autogenerated conversion function. +func Convert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error { + return autoConvert_api_PodAffinity_To_v1_PodAffinity(in, out, s) +} + +func autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { + out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.TopologyKey = in.TopologyKey + return nil +} + +// Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm is an autogenerated conversion function. +func Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in, out, s) +} + +func autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error { + out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.TopologyKey = in.TopologyKey + return nil +} + +// Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm is an autogenerated conversion function. +func Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error { + return autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s) +} + +func autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]api.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity is an autogenerated conversion function. +func Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in, out, s) +} + +func autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity is an autogenerated conversion function. +func Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error { + return autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s) +} + +func autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + +// Convert_v1_PodAttachOptions_To_api_PodAttachOptions is an autogenerated conversion function. +func Convert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { + return autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in, out, s) +} + +func autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + +// Convert_api_PodAttachOptions_To_v1_PodAttachOptions is an autogenerated conversion function. +func Convert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error { + return autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in, out, s) +} + +func autoConvert_v1_PodCondition_To_api_PodCondition(in *PodCondition, out *api.PodCondition, s conversion.Scope) error { + out.Type = api.PodConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_PodCondition_To_api_PodCondition is an autogenerated conversion function. +func Convert_v1_PodCondition_To_api_PodCondition(in *PodCondition, out *api.PodCondition, s conversion.Scope) error { + return autoConvert_v1_PodCondition_To_api_PodCondition(in, out, s) +} + +func autoConvert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error { + out.Type = PodConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_api_PodCondition_To_v1_PodCondition is an autogenerated conversion function. +func Convert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error { + return autoConvert_api_PodCondition_To_v1_PodCondition(in, out, s) +} + +func autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in *PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_v1_PodExecOptions_To_api_PodExecOptions is an autogenerated conversion function. +func Convert_v1_PodExecOptions_To_api_PodExecOptions(in *PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { + return autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in, out, s) +} + +func autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + if in.Command == nil { + out.Command = make([]string, 0) + } else { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + } + return nil +} + +// Convert_api_PodExecOptions_To_v1_PodExecOptions is an autogenerated conversion function. +func Convert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error { + return autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in, out, s) +} + +func autoConvert_v1_PodList_To_api_PodList(in *PodList, out *api.PodList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Pod, len(*in)) + for i := range *in { + if err := Convert_v1_Pod_To_api_Pod(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_PodList_To_api_PodList is an autogenerated conversion function. +func Convert_v1_PodList_To_api_PodList(in *PodList, out *api.PodList, s conversion.Scope) error { + return autoConvert_v1_PodList_To_api_PodList(in, out, s) +} + +func autoConvert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + if err := Convert_api_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]Pod, 0) + } + return nil +} + +// Convert_api_PodList_To_v1_PodList is an autogenerated conversion function. +func Convert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error { + return autoConvert_api_PodList_To_v1_PodList(in, out, s) +} + +func autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) + out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) + out.Timestamps = in.Timestamps + out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) + out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) + return nil +} + +// Convert_v1_PodLogOptions_To_api_PodLogOptions is an autogenerated conversion function. +func Convert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { + return autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in, out, s) +} + +func autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error { + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) + out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) + out.Timestamps = in.Timestamps + out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) + out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) + return nil +} + +// Convert_api_PodLogOptions_To_v1_PodLogOptions is an autogenerated conversion function. +func Convert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error { + return autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in, out, s) +} + +func autoConvert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in *PodPortForwardOptions, out *api.PodPortForwardOptions, s conversion.Scope) error { + out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions is an autogenerated conversion function. +func Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in *PodPortForwardOptions, out *api.PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in, out, s) +} + +func autoConvert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *api.PodPortForwardOptions, out *PodPortForwardOptions, s conversion.Scope) error { + out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions is an autogenerated conversion function. +func Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *api.PodPortForwardOptions, out *PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in, out, s) +} + +func autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_PodProxyOptions_To_api_PodProxyOptions is an autogenerated conversion function. +func Convert_v1_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { + return autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in, out, s) +} + +func autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_api_PodProxyOptions_To_v1_PodProxyOptions is an autogenerated conversion function. +func Convert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error { + return autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in, out, s) +} + +func autoConvert_v1_PodSecurityContext_To_api_PodSecurityContext(in *PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { + out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*types.UnixUserID)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.SupplementalGroups = *(*[]types.UnixGroupID)(unsafe.Pointer(&in.SupplementalGroups)) + out.FSGroup = (*types.UnixGroupID)(unsafe.Pointer(in.FSGroup)) + return nil +} + +func autoConvert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *PodSecurityContext, s conversion.Scope) error { + // INFO: in.HostNetwork opted out of conversion generation + // INFO: in.HostPID opted out of conversion generation + // INFO: in.HostIPC opted out of conversion generation + out.SELinuxOptions = (*SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*types.UnixUserID)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.SupplementalGroups = *(*[]types.UnixGroupID)(unsafe.Pointer(&in.SupplementalGroups)) + out.FSGroup = (*types.UnixGroupID)(unsafe.Pointer(in.FSGroup)) + return nil +} + +func autoConvert_v1_PodSignature_To_api_PodSignature(in *PodSignature, out *api.PodSignature, s conversion.Scope) error { + out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) + return nil +} + +// Convert_v1_PodSignature_To_api_PodSignature is an autogenerated conversion function. +func Convert_v1_PodSignature_To_api_PodSignature(in *PodSignature, out *api.PodSignature, s conversion.Scope) error { + return autoConvert_v1_PodSignature_To_api_PodSignature(in, out, s) +} + +func autoConvert_api_PodSignature_To_v1_PodSignature(in *api.PodSignature, out *PodSignature, s conversion.Scope) error { + out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) + return nil +} + +// Convert_api_PodSignature_To_v1_PodSignature is an autogenerated conversion function. +func Convert_api_PodSignature_To_v1_PodSignature(in *api.PodSignature, out *PodSignature, s conversion.Scope) error { + return autoConvert_api_PodSignature_To_v1_PodSignature(in, out, s) +} + +func autoConvert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversion.Scope) error { + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]api.Volume, len(*in)) + for i := range *in { + if err := Convert_v1_Volume_To_api_Volume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + out.InitContainers = *(*[]api.Container)(unsafe.Pointer(&in.InitContainers)) + out.Containers = *(*[]api.Container)(unsafe.Pointer(&in.Containers)) + out.RestartPolicy = api.RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.DNSPolicy = api.DNSPolicy(in.DNSPolicy) + out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) + out.ServiceAccountName = in.ServiceAccountName + // INFO: in.DeprecatedServiceAccount opted out of conversion generation + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + out.NodeName = in.NodeName + // INFO: in.HostNetwork opted out of conversion generation + // INFO: in.HostPID opted out of conversion generation + // INFO: in.HostIPC opted out of conversion generation + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(api.PodSecurityContext) + if err := Convert_v1_PodSecurityContext_To_api_PodSecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain + out.Affinity = (*api.Affinity)(unsafe.Pointer(in.Affinity)) + out.SchedulerName = in.SchedulerName + out.Tolerations = *(*[]api.Toleration)(unsafe.Pointer(&in.Tolerations)) + out.HostAliases = *(*[]api.HostAlias)(unsafe.Pointer(&in.HostAliases)) + return nil +} + +func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error { + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + if err := Convert_api_Volume_To_v1_Volume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + out.InitContainers = *(*[]Container)(unsafe.Pointer(&in.InitContainers)) + if in.Containers == nil { + out.Containers = make([]Container, 0) + } else { + out.Containers = *(*[]Container)(unsafe.Pointer(&in.Containers)) + } + out.RestartPolicy = RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.DNSPolicy = DNSPolicy(in.DNSPolicy) + out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) + out.ServiceAccountName = in.ServiceAccountName + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + out.NodeName = in.NodeName + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(PodSecurityContext) + if err := Convert_api_PodSecurityContext_To_v1_PodSecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.ImagePullSecrets = *(*[]LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain + out.Affinity = (*Affinity)(unsafe.Pointer(in.Affinity)) + out.SchedulerName = in.SchedulerName + out.Tolerations = *(*[]Toleration)(unsafe.Pointer(&in.Tolerations)) + out.HostAliases = *(*[]HostAlias)(unsafe.Pointer(&in.HostAliases)) + return nil +} + +func autoConvert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s conversion.Scope) error { + out.Phase = api.PodPhase(in.Phase) + out.Conditions = *(*[]api.PodCondition)(unsafe.Pointer(&in.Conditions)) + out.Message = in.Message + out.Reason = in.Reason + out.HostIP = in.HostIP + out.PodIP = in.PodIP + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.InitContainerStatuses = *(*[]api.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) + out.ContainerStatuses = *(*[]api.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + out.QOSClass = api.PodQOSClass(in.QOSClass) + return nil +} + +// Convert_v1_PodStatus_To_api_PodStatus is an autogenerated conversion function. +func Convert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s conversion.Scope) error { + return autoConvert_v1_PodStatus_To_api_PodStatus(in, out, s) +} + +func autoConvert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error { + out.Phase = PodPhase(in.Phase) + out.Conditions = *(*[]PodCondition)(unsafe.Pointer(&in.Conditions)) + out.Message = in.Message + out.Reason = in.Reason + out.HostIP = in.HostIP + out.PodIP = in.PodIP + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.QOSClass = PodQOSClass(in.QOSClass) + out.InitContainerStatuses = *(*[]ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) + out.ContainerStatuses = *(*[]ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + return nil +} + +// Convert_api_PodStatus_To_v1_PodStatus is an autogenerated conversion function. +func Convert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error { + return autoConvert_api_PodStatus_To_v1_PodStatus(in, out, s) +} + +func autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PodTemplate_To_api_PodTemplate is an autogenerated conversion function. +func Convert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { + return autoConvert_v1_PodTemplate_To_api_PodTemplate(in, out, s) +} + +func autoConvert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +// Convert_api_PodTemplate_To_v1_PodTemplate is an autogenerated conversion function. +func Convert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error { + return autoConvert_api_PodTemplate_To_v1_PodTemplate(in, out, s) +} + +func autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.PodTemplate, len(*in)) + for i := range *in { + if err := Convert_v1_PodTemplate_To_api_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_PodTemplateList_To_api_PodTemplateList is an autogenerated conversion function. +func Convert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { + return autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in, out, s) +} + +func autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + if err := Convert_api_PodTemplate_To_v1_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]PodTemplate, 0) + } + return nil +} + +// Convert_api_PodTemplateList_To_v1_PodTemplateList is an autogenerated conversion function. +func Convert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error { + return autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in, out, s) +} + +func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource is an autogenerated conversion function. +func Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in, out, s) +} + +func autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *PortworxVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource is an autogenerated conversion function. +func Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *PortworxVolumeSource, s conversion.Scope) error { + return autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in, out, s) +} + +func autoConvert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error { + out.UID = (*types.UID)(unsafe.Pointer(in.UID)) + return nil +} + +// Convert_v1_Preconditions_To_api_Preconditions is an autogenerated conversion function. +func Convert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error { + return autoConvert_v1_Preconditions_To_api_Preconditions(in, out, s) +} + +func autoConvert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *Preconditions, s conversion.Scope) error { + out.UID = (*types.UID)(unsafe.Pointer(in.UID)) + return nil +} + +// Convert_api_Preconditions_To_v1_Preconditions is an autogenerated conversion function. +func Convert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *Preconditions, s conversion.Scope) error { + return autoConvert_api_Preconditions_To_v1_Preconditions(in, out, s) +} + +func autoConvert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in *PreferAvoidPodsEntry, out *api.PreferAvoidPodsEntry, s conversion.Scope) error { + if err := Convert_v1_PodSignature_To_api_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { + return err + } + out.EvictionTime = in.EvictionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry is an autogenerated conversion function. +func Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in *PreferAvoidPodsEntry, out *api.PreferAvoidPodsEntry, s conversion.Scope) error { + return autoConvert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in, out, s) +} + +func autoConvert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *api.PreferAvoidPodsEntry, out *PreferAvoidPodsEntry, s conversion.Scope) error { + if err := Convert_api_PodSignature_To_v1_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { + return err + } + out.EvictionTime = in.EvictionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry is an autogenerated conversion function. +func Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *api.PreferAvoidPodsEntry, out *PreferAvoidPodsEntry, s conversion.Scope) error { + return autoConvert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in, out, s) +} + +func autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm is an autogenerated conversion function. +func Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { + return autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in, out, s) +} + +func autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *PreferredSchedulingTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { + return err + } + return nil +} + +// Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm is an autogenerated conversion function. +func Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *PreferredSchedulingTerm, s conversion.Scope) error { + return autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in, out, s) +} + +func autoConvert_v1_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.Scope) error { + if err := Convert_v1_Handler_To_api_Handler(&in.Handler, &out.Handler, s); err != nil { + return err + } + out.InitialDelaySeconds = in.InitialDelaySeconds + out.TimeoutSeconds = in.TimeoutSeconds + out.PeriodSeconds = in.PeriodSeconds + out.SuccessThreshold = in.SuccessThreshold + out.FailureThreshold = in.FailureThreshold + return nil +} + +// Convert_v1_Probe_To_api_Probe is an autogenerated conversion function. +func Convert_v1_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.Scope) error { + return autoConvert_v1_Probe_To_api_Probe(in, out, s) +} + +func autoConvert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error { + if err := Convert_api_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { + return err + } + out.InitialDelaySeconds = in.InitialDelaySeconds + out.TimeoutSeconds = in.TimeoutSeconds + out.PeriodSeconds = in.PeriodSeconds + out.SuccessThreshold = in.SuccessThreshold + out.FailureThreshold = in.FailureThreshold + return nil +} + +// Convert_api_Probe_To_v1_Probe is an autogenerated conversion function. +func Convert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error { + return autoConvert_api_Probe_To_v1_Probe(in, out, s) +} + +func autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error { + out.Sources = *(*[]api.VolumeProjection)(unsafe.Pointer(&in.Sources)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource is an autogenerated conversion function. +func Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in, out, s) +} + +func autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *ProjectedVolumeSource, s conversion.Scope) error { + if in.Sources == nil { + out.Sources = make([]VolumeProjection, 0) + } else { + out.Sources = *(*[]VolumeProjection)(unsafe.Pointer(&in.Sources)) + } + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource is an autogenerated conversion function. +func Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *ProjectedVolumeSource, s conversion.Scope) error { + return autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in, out, s) +} + +func autoConvert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in *QuobyteVolumeSource, out *api.QuobyteVolumeSource, s conversion.Scope) error { + out.Registry = in.Registry + out.Volume = in.Volume + out.ReadOnly = in.ReadOnly + out.User = in.User + out.Group = in.Group + return nil +} + +// Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource is an autogenerated conversion function. +func Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in *QuobyteVolumeSource, out *api.QuobyteVolumeSource, s conversion.Scope) error { + return autoConvert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in, out, s) +} + +func autoConvert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *api.QuobyteVolumeSource, out *QuobyteVolumeSource, s conversion.Scope) error { + out.Registry = in.Registry + out.Volume = in.Volume + out.ReadOnly = in.ReadOnly + out.User = in.User + out.Group = in.Group + return nil +} + +// Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource is an autogenerated conversion function. +func Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *api.QuobyteVolumeSource, out *QuobyteVolumeSource, s conversion.Scope) error { + return autoConvert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in, out, s) +} + +func autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource is an autogenerated conversion function. +func Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { + return autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in, out, s) +} + +func autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error { + if in.CephMonitors == nil { + out.CephMonitors = make([]string, 0) + } else { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + } + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource is an autogenerated conversion function. +func Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error { + return autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) +} + +func autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Range = in.Range + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_v1_RangeAllocation_To_api_RangeAllocation is an autogenerated conversion function. +func Convert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { + return autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in, out, s) +} + +func autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Range = in.Range + if in.Data == nil { + out.Data = make([]byte, 0) + } else { + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + } + return nil +} + +// Convert_api_RangeAllocation_To_v1_RangeAllocation is an autogenerated conversion function. +func Convert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error { + return autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in, out, s) +} + +func autoConvert_v1_ReplicationController_To_api_ReplicationController(in *ReplicationController, out *api.ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ReplicationController_To_api_ReplicationController is an autogenerated conversion function. +func Convert_v1_ReplicationController_To_api_ReplicationController(in *ReplicationController, out *api.ReplicationController, s conversion.Scope) error { + return autoConvert_v1_ReplicationController_To_api_ReplicationController(in, out, s) +} + +func autoConvert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_api_ReplicationController_To_v1_ReplicationController is an autogenerated conversion function. +func Convert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error { + return autoConvert_api_ReplicationController_To_v1_ReplicationController(in, out, s) +} + +func autoConvert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in *ReplicationControllerCondition, out *api.ReplicationControllerCondition, s conversion.Scope) error { + out.Type = api.ReplicationControllerConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition is an autogenerated conversion function. +func Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in *ReplicationControllerCondition, out *api.ReplicationControllerCondition, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in, out, s) +} + +func autoConvert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *api.ReplicationControllerCondition, out *ReplicationControllerCondition, s conversion.Scope) error { + out.Type = ReplicationControllerConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition is an autogenerated conversion function. +func Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *api.ReplicationControllerCondition, out *ReplicationControllerCondition, s conversion.Scope) error { + return autoConvert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in, out, s) +} + +func autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.ReplicationController, len(*in)) + for i := range *in { + if err := Convert_v1_ReplicationController_To_api_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList is an autogenerated conversion function. +func Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in, out, s) +} + +func autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + if err := Convert_api_ReplicationController_To_v1_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]ReplicationController, 0) + } + return nil +} + +// Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList is an autogenerated conversion function. +func Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error { + return autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s) +} + +func autoConvert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(api.PodTemplateSpec) + if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func autoConvert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *ReplicationControllerSpec, s conversion.Scope) error { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(PodTemplateSpec) + if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]api.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus is an autogenerated conversion function. +func Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in, out, s) +} + +func autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus is an autogenerated conversion function. +func Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error { + return autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s) +} + +func autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + out.Divisor = in.Divisor + return nil +} + +// Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector is an autogenerated conversion function. +func Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { + return autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in, out, s) +} + +func autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *ResourceFieldSelector, s conversion.Scope) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + out.Divisor = in.Divisor + return nil +} + +// Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector is an autogenerated conversion function. +func Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *ResourceFieldSelector, s conversion.Scope) error { + return autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s) +} + +func autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ResourceQuota_To_api_ResourceQuota is an autogenerated conversion function. +func Convert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { + return autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in, out, s) +} + +func autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_api_ResourceQuota_To_v1_ResourceQuota is an autogenerated conversion function. +func Convert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error { + return autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in, out, s) +} + +func autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.ResourceQuota)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList is an autogenerated conversion function. +func Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in, out, s) +} + +func autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]ResourceQuota, 0) + } else { + out.Items = *(*[]ResourceQuota)(unsafe.Pointer(&in.Items)) + } + return nil +} + +// Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList is an autogenerated conversion function. +func Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error { + return autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s) +} + +func autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { + out.Hard = *(*api.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Scopes = *(*[]api.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) + return nil +} + +// Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec is an autogenerated conversion function. +func Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in, out, s) +} + +func autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error { + out.Hard = *(*ResourceList)(unsafe.Pointer(&in.Hard)) + out.Scopes = *(*[]ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) + return nil +} + +// Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec is an autogenerated conversion function. +func Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error { + return autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s) +} + +func autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { + out.Hard = *(*api.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Used = *(*api.ResourceList)(unsafe.Pointer(&in.Used)) + return nil +} + +// Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus is an autogenerated conversion function. +func Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in, out, s) +} + +func autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error { + out.Hard = *(*ResourceList)(unsafe.Pointer(&in.Hard)) + out.Used = *(*ResourceList)(unsafe.Pointer(&in.Used)) + return nil +} + +// Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus is an autogenerated conversion function. +func Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error { + return autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s) +} + +func autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { + out.Limits = *(*api.ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*api.ResourceList)(unsafe.Pointer(&in.Requests)) + return nil +} + +// Convert_v1_ResourceRequirements_To_api_ResourceRequirements is an autogenerated conversion function. +func Convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { + return autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in, out, s) +} + +func autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error { + out.Limits = *(*ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*ResourceList)(unsafe.Pointer(&in.Requests)) + return nil +} + +// Convert_api_ResourceRequirements_To_v1_ResourceRequirements is an autogenerated conversion function. +func Convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error { + return autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) +} + +func autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in *SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { + out.User = in.User + out.Role = in.Role + out.Type = in.Type + out.Level = in.Level + return nil +} + +// Convert_v1_SELinuxOptions_To_api_SELinuxOptions is an autogenerated conversion function. +func Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in *SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { + return autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in, out, s) +} + +func autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error { + out.User = in.User + out.Role = in.Role + out.Type = in.Type + out.Level = in.Level + return nil +} + +// Convert_api_SELinuxOptions_To_v1_SELinuxOptions is an autogenerated conversion function. +func Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error { + return autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) +} + +func autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource is an autogenerated conversion function. +func Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in, out, s) +} + +func autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *ScaleIOVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource is an autogenerated conversion function. +func Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *ScaleIOVolumeSource, s conversion.Scope) error { + return autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in, out, s) +} + +func autoConvert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) + // INFO: in.StringData opted out of conversion generation + out.Type = api.SecretType(in.Type) + return nil +} + +func autoConvert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) + out.Type = SecretType(in.Type) + return nil +} + +// Convert_api_Secret_To_v1_Secret is an autogenerated conversion function. +func Convert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error { + return autoConvert_api_Secret_To_v1_Secret(in, out, s) +} + +func autoConvert_v1_SecretEnvSource_To_api_SecretEnvSource(in *SecretEnvSource, out *api.SecretEnvSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretEnvSource_To_api_SecretEnvSource is an autogenerated conversion function. +func Convert_v1_SecretEnvSource_To_api_SecretEnvSource(in *SecretEnvSource, out *api.SecretEnvSource, s conversion.Scope) error { + return autoConvert_v1_SecretEnvSource_To_api_SecretEnvSource(in, out, s) +} + +func autoConvert_api_SecretEnvSource_To_v1_SecretEnvSource(in *api.SecretEnvSource, out *SecretEnvSource, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_api_SecretEnvSource_To_v1_SecretEnvSource is an autogenerated conversion function. +func Convert_api_SecretEnvSource_To_v1_SecretEnvSource(in *api.SecretEnvSource, out *SecretEnvSource, s conversion.Scope) error { + return autoConvert_api_SecretEnvSource_To_v1_SecretEnvSource(in, out, s) +} + +func autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in *SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretKeySelector_To_api_SecretKeySelector is an autogenerated conversion function. +func Convert_v1_SecretKeySelector_To_api_SecretKeySelector(in *SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { + return autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in, out, s) +} + +func autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *SecretKeySelector, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_api_SecretKeySelector_To_v1_SecretKeySelector is an autogenerated conversion function. +func Convert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *SecretKeySelector, s conversion.Scope) error { + return autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) +} + +func autoConvert_v1_SecretList_To_api_SecretList(in *SecretList, out *api.SecretList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Secret, len(*in)) + for i := range *in { + if err := Convert_v1_Secret_To_api_Secret(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_SecretList_To_api_SecretList is an autogenerated conversion function. +func Convert_v1_SecretList_To_api_SecretList(in *SecretList, out *api.SecretList, s conversion.Scope) error { + return autoConvert_v1_SecretList_To_api_SecretList(in, out, s) +} + +func autoConvert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + if err := Convert_api_Secret_To_v1_Secret(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]Secret, 0) + } + return nil +} + +// Convert_api_SecretList_To_v1_SecretList is an autogenerated conversion function. +func Convert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error { + return autoConvert_api_SecretList_To_v1_SecretList(in, out, s) +} + +func autoConvert_v1_SecretProjection_To_api_SecretProjection(in *SecretProjection, out *api.SecretProjection, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretProjection_To_api_SecretProjection is an autogenerated conversion function. +func Convert_v1_SecretProjection_To_api_SecretProjection(in *SecretProjection, out *api.SecretProjection, s conversion.Scope) error { + return autoConvert_v1_SecretProjection_To_api_SecretProjection(in, out, s) +} + +func autoConvert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *SecretProjection, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_api_SecretProjection_To_v1_SecretProjection is an autogenerated conversion function. +func Convert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *SecretProjection, s conversion.Scope) error { + return autoConvert_api_SecretProjection_To_v1_SecretProjection(in, out, s) +} + +func autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource is an autogenerated conversion function. +func Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { + return autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in, out, s) +} + +func autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource is an autogenerated conversion function. +func Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error { + return autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) +} + +func autoConvert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error { + out.Capabilities = (*api.Capabilities)(unsafe.Pointer(in.Capabilities)) + out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) + out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*types.UnixUserID)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) + return nil +} + +// Convert_v1_SecurityContext_To_api_SecurityContext is an autogenerated conversion function. +func Convert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error { + return autoConvert_v1_SecurityContext_To_api_SecurityContext(in, out, s) +} + +func autoConvert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error { + out.Capabilities = (*Capabilities)(unsafe.Pointer(in.Capabilities)) + out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) + out.SELinuxOptions = (*SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*types.UnixUserID)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) + return nil +} + +// Convert_api_SecurityContext_To_v1_SecurityContext is an autogenerated conversion function. +func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error { + return autoConvert_api_SecurityContext_To_v1_SecurityContext(in, out, s) +} + +func autoConvert_v1_SerializedReference_To_api_SerializedReference(in *SerializedReference, out *api.SerializedReference, s conversion.Scope) error { + if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Reference, &out.Reference, s); err != nil { + return err + } + return nil +} + +// Convert_v1_SerializedReference_To_api_SerializedReference is an autogenerated conversion function. +func Convert_v1_SerializedReference_To_api_SerializedReference(in *SerializedReference, out *api.SerializedReference, s conversion.Scope) error { + return autoConvert_v1_SerializedReference_To_api_SerializedReference(in, out, s) +} + +func autoConvert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error { + if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil { + return err + } + return nil +} + +// Convert_api_SerializedReference_To_v1_SerializedReference is an autogenerated conversion function. +func Convert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error { + return autoConvert_api_SerializedReference_To_v1_SerializedReference(in, out, s) +} + +func autoConvert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ServiceSpec_To_api_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ServiceStatus_To_api_ServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Service_To_api_Service is an autogenerated conversion function. +func Convert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversion.Scope) error { + return autoConvert_v1_Service_To_api_Service(in, out, s) +} + +func autoConvert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_api_Service_To_v1_Service is an autogenerated conversion function. +func Convert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error { + return autoConvert_api_Service_To_v1_Service(in, out, s) +} + +func autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Secrets = *(*[]api.ObjectReference)(unsafe.Pointer(&in.Secrets)) + out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + return nil +} + +// Convert_v1_ServiceAccount_To_api_ServiceAccount is an autogenerated conversion function. +func Convert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { + return autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in, out, s) +} + +func autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Secrets = *(*[]ObjectReference)(unsafe.Pointer(&in.Secrets)) + out.ImagePullSecrets = *(*[]LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + return nil +} + +// Convert_api_ServiceAccount_To_v1_ServiceAccount is an autogenerated conversion function. +func Convert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error { + return autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in, out, s) +} + +func autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.ServiceAccount)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ServiceAccountList_To_api_ServiceAccountList is an autogenerated conversion function. +func Convert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { + return autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in, out, s) +} + +func autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]ServiceAccount, 0) + } else { + out.Items = *(*[]ServiceAccount)(unsafe.Pointer(&in.Items)) + } + return nil +} + +// Convert_api_ServiceAccountList_To_v1_ServiceAccountList is an autogenerated conversion function. +func Convert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error { + return autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in, out, s) +} + +func autoConvert_v1_ServiceList_To_api_ServiceList(in *ServiceList, out *api.ServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Service, len(*in)) + for i := range *in { + if err := Convert_v1_Service_To_api_Service(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_ServiceList_To_api_ServiceList is an autogenerated conversion function. +func Convert_v1_ServiceList_To_api_ServiceList(in *ServiceList, out *api.ServiceList, s conversion.Scope) error { + return autoConvert_v1_ServiceList_To_api_ServiceList(in, out, s) +} + +func autoConvert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + if err := Convert_api_Service_To_v1_Service(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]Service, 0) + } + return nil +} + +// Convert_api_ServiceList_To_v1_ServiceList is an autogenerated conversion function. +func Convert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error { + return autoConvert_api_ServiceList_To_v1_ServiceList(in, out, s) +} + +func autoConvert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.ServicePort, s conversion.Scope) error { + out.Name = in.Name + out.Protocol = api.Protocol(in.Protocol) + out.Port = in.Port + out.TargetPort = in.TargetPort + out.NodePort = in.NodePort + return nil +} + +// Convert_v1_ServicePort_To_api_ServicePort is an autogenerated conversion function. +func Convert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.ServicePort, s conversion.Scope) error { + return autoConvert_v1_ServicePort_To_api_ServicePort(in, out, s) +} + +func autoConvert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error { + out.Name = in.Name + out.Protocol = Protocol(in.Protocol) + out.Port = in.Port + out.TargetPort = in.TargetPort + out.NodePort = in.NodePort + return nil +} + +// Convert_api_ServicePort_To_v1_ServicePort is an autogenerated conversion function. +func Convert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error { + return autoConvert_api_ServicePort_To_v1_ServicePort(in, out, s) +} + +func autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions is an autogenerated conversion function. +func Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in, out, s) +} + +func autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *ServiceProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions is an autogenerated conversion function. +func Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s) +} + +func autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { + out.Ports = *(*[]api.ServicePort)(unsafe.Pointer(&in.Ports)) + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + out.ClusterIP = in.ClusterIP + out.Type = api.ServiceType(in.Type) + out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) + out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity) + out.LoadBalancerIP = in.LoadBalancerIP + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + out.ExternalName = in.ExternalName + out.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) + out.HealthCheckNodePort = in.HealthCheckNodePort + return nil +} + +// Convert_v1_ServiceSpec_To_api_ServiceSpec is an autogenerated conversion function. +func Convert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { + return autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in, out, s) +} + +func autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error { + out.Type = ServiceType(in.Type) + out.Ports = *(*[]ServicePort)(unsafe.Pointer(&in.Ports)) + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + out.ClusterIP = in.ClusterIP + out.ExternalName = in.ExternalName + out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) + out.LoadBalancerIP = in.LoadBalancerIP + out.SessionAffinity = ServiceAffinity(in.SessionAffinity) + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + out.ExternalTrafficPolicy = ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) + out.HealthCheckNodePort = in.HealthCheckNodePort + return nil +} + +// Convert_api_ServiceSpec_To_v1_ServiceSpec is an autogenerated conversion function. +func Convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error { + return autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in, out, s) +} + +func autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { + if err := Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ServiceStatus_To_api_ServiceStatus is an autogenerated conversion function. +func Convert_v1_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { + return autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in, out, s) +} + +func autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error { + if err := Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { + return err + } + return nil +} + +// Convert_api_ServiceStatus_To_v1_ServiceStatus is an autogenerated conversion function. +func Convert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error { + return autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in, out, s) +} + +func autoConvert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource(in *StorageOSPersistentVolumeSource, out *api.StorageOSPersistentVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*api.ObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource(in *StorageOSPersistentVolumeSource, out *api.StorageOSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource(in, out, s) +} + +func autoConvert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *api.StorageOSPersistentVolumeSource, out *StorageOSPersistentVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*ObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource is an autogenerated conversion function. +func Convert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *api.StorageOSPersistentVolumeSource, out *StorageOSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource(in *StorageOSVolumeSource, out *api.StorageOSVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource is an autogenerated conversion function. +func Convert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource(in *StorageOSVolumeSource, out *api.StorageOSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource(in, out, s) +} + +func autoConvert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *api.StorageOSVolumeSource, out *StorageOSVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource is an autogenerated conversion function. +func Convert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *api.StorageOSVolumeSource, out *StorageOSVolumeSource, s conversion.Scope) error { + return autoConvert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in, out, s) +} + +func autoConvert_v1_Sysctl_To_api_Sysctl(in *Sysctl, out *api.Sysctl, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_v1_Sysctl_To_api_Sysctl is an autogenerated conversion function. +func Convert_v1_Sysctl_To_api_Sysctl(in *Sysctl, out *api.Sysctl, s conversion.Scope) error { + return autoConvert_v1_Sysctl_To_api_Sysctl(in, out, s) +} + +func autoConvert_api_Sysctl_To_v1_Sysctl(in *api.Sysctl, out *Sysctl, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_api_Sysctl_To_v1_Sysctl is an autogenerated conversion function. +func Convert_api_Sysctl_To_v1_Sysctl(in *api.Sysctl, out *Sysctl, s conversion.Scope) error { + return autoConvert_api_Sysctl_To_v1_Sysctl(in, out, s) +} + +func autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { + out.Port = in.Port + out.Host = in.Host + return nil +} + +// Convert_v1_TCPSocketAction_To_api_TCPSocketAction is an autogenerated conversion function. +func Convert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { + return autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in, out, s) +} + +func autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error { + out.Port = in.Port + out.Host = in.Host + return nil +} + +// Convert_api_TCPSocketAction_To_v1_TCPSocketAction is an autogenerated conversion function. +func Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error { + return autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) +} + +func autoConvert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.Scope) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = api.TaintEffect(in.Effect) + out.TimeAdded = in.TimeAdded + return nil +} + +// Convert_v1_Taint_To_api_Taint is an autogenerated conversion function. +func Convert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.Scope) error { + return autoConvert_v1_Taint_To_api_Taint(in, out, s) +} + +func autoConvert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.Scope) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = TaintEffect(in.Effect) + out.TimeAdded = in.TimeAdded + return nil +} + +// Convert_api_Taint_To_v1_Taint is an autogenerated conversion function. +func Convert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.Scope) error { + return autoConvert_api_Taint_To_v1_Taint(in, out, s) +} + +func autoConvert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Operator = api.TolerationOperator(in.Operator) + out.Value = in.Value + out.Effect = api.TaintEffect(in.Effect) + out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) + return nil +} + +// Convert_v1_Toleration_To_api_Toleration is an autogenerated conversion function. +func Convert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Toleration, s conversion.Scope) error { + return autoConvert_v1_Toleration_To_api_Toleration(in, out, s) +} + +func autoConvert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Operator = TolerationOperator(in.Operator) + out.Value = in.Value + out.Effect = TaintEffect(in.Effect) + out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) + return nil +} + +// Convert_api_Toleration_To_v1_Toleration is an autogenerated conversion function. +func Convert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Toleration, s conversion.Scope) error { + return autoConvert_api_Toleration_To_v1_Toleration(in, out, s) +} + +func autoConvert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1_VolumeSource_To_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Volume_To_api_Volume is an autogenerated conversion function. +func Convert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error { + return autoConvert_v1_Volume_To_api_Volume(in, out, s) +} + +func autoConvert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_api_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { + return err + } + return nil +} + +// Convert_api_Volume_To_v1_Volume is an autogenerated conversion function. +func Convert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error { + return autoConvert_api_Volume_To_v1_Volume(in, out, s) +} + +func autoConvert_v1_VolumeMount_To_api_VolumeMount(in *VolumeMount, out *api.VolumeMount, s conversion.Scope) error { + out.Name = in.Name + out.ReadOnly = in.ReadOnly + out.MountPath = in.MountPath + out.SubPath = in.SubPath + return nil +} + +// Convert_v1_VolumeMount_To_api_VolumeMount is an autogenerated conversion function. +func Convert_v1_VolumeMount_To_api_VolumeMount(in *VolumeMount, out *api.VolumeMount, s conversion.Scope) error { + return autoConvert_v1_VolumeMount_To_api_VolumeMount(in, out, s) +} + +func autoConvert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error { + out.Name = in.Name + out.ReadOnly = in.ReadOnly + out.MountPath = in.MountPath + out.SubPath = in.SubPath + return nil +} + +// Convert_api_VolumeMount_To_v1_VolumeMount is an autogenerated conversion function. +func Convert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error { + return autoConvert_api_VolumeMount_To_v1_VolumeMount(in, out, s) +} + +func autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in *VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error { + out.Secret = (*api.SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*api.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*api.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +// Convert_v1_VolumeProjection_To_api_VolumeProjection is an autogenerated conversion function. +func Convert_v1_VolumeProjection_To_api_VolumeProjection(in *VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error { + return autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in, out, s) +} + +func autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *VolumeProjection, s conversion.Scope) error { + out.Secret = (*SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +// Convert_api_VolumeProjection_To_v1_VolumeProjection is an autogenerated conversion function. +func Convert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *VolumeProjection, s conversion.Scope) error { + return autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in, out, s) +} + +func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error { + out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.EmptyDir = (*api.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) + out.GCEPersistentDisk = (*api.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*api.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.GitRepo = (*api.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) + out.Secret = (*api.SecretVolumeSource)(unsafe.Pointer(in.Secret)) + out.NFS = (*api.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.ISCSI = (*api.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Glusterfs = (*api.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.PersistentVolumeClaim = (*api.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) + out.RBD = (*api.RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.FlexVolume = (*api.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*api.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*api.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.Flocker = (*api.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.DownwardAPI = (*api.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) + out.FC = (*api.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.AzureFile = (*api.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.ConfigMap = (*api.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) + out.VsphereVolume = (*api.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.Projected = (*api.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) + out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.StorageOS = (*api.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) + return nil +} + +// Convert_v1_VolumeSource_To_api_VolumeSource is an autogenerated conversion function. +func Convert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error { + return autoConvert_v1_VolumeSource_To_api_VolumeSource(in, out, s) +} + +func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error { + out.HostPath = (*HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.EmptyDir = (*EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) + out.GCEPersistentDisk = (*GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.GitRepo = (*GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) + out.Secret = (*SecretVolumeSource)(unsafe.Pointer(in.Secret)) + out.NFS = (*NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.ISCSI = (*ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Glusterfs = (*GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.PersistentVolumeClaim = (*PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) + out.RBD = (*RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.Quobyte = (*QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.FlexVolume = (*FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.Flocker = (*FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.DownwardAPI = (*DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) + out.FC = (*FCVolumeSource)(unsafe.Pointer(in.FC)) + out.AzureFile = (*AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.ConfigMap = (*ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) + out.VsphereVolume = (*VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.AzureDisk = (*AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.Projected = (*ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) + out.PortworxVolume = (*PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.StorageOS = (*StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) + return nil +} + +// Convert_api_VolumeSource_To_v1_VolumeSource is an autogenerated conversion function. +func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error { + return autoConvert_api_VolumeSource_To_v1_VolumeSource(in, out, s) +} + +func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType + out.StoragePolicyName = in.StoragePolicyName + out.StoragePolicyID = in.StoragePolicyID + return nil +} + +// Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in, out, s) +} + +func autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType + out.StoragePolicyName = in.StoragePolicyName + out.StoragePolicyID = in.StoragePolicyID + return nil +} + +// Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. +func Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + return autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +// Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm is an autogenerated conversion function. +func Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in, out, s) +} + +func autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +// Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm is an autogenerated conversion function. +func Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..38ee730eb --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,3770 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AWSElasticBlockStoreVolumeSource, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Affinity, InType: reflect.TypeOf(&Affinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AttachedVolume, InType: reflect.TypeOf(&AttachedVolume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AvoidPods, InType: reflect.TypeOf(&AvoidPods{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AzureDiskVolumeSource, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AzureFileVolumeSource, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Binding, InType: reflect.TypeOf(&Binding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Capabilities, InType: reflect.TypeOf(&Capabilities{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CephFSVolumeSource, InType: reflect.TypeOf(&CephFSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CinderVolumeSource, InType: reflect.TypeOf(&CinderVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ComponentCondition, InType: reflect.TypeOf(&ComponentCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ComponentStatus, InType: reflect.TypeOf(&ComponentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ComponentStatusList, InType: reflect.TypeOf(&ComponentStatusList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMap, InType: reflect.TypeOf(&ConfigMap{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapEnvSource, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapKeySelector, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapList, InType: reflect.TypeOf(&ConfigMapList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapProjection, InType: reflect.TypeOf(&ConfigMapProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapVolumeSource, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Container, InType: reflect.TypeOf(&Container{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerImage, InType: reflect.TypeOf(&ContainerImage{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerPort, InType: reflect.TypeOf(&ContainerPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerState, InType: reflect.TypeOf(&ContainerState{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStateRunning, InType: reflect.TypeOf(&ContainerStateRunning{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStateTerminated, InType: reflect.TypeOf(&ContainerStateTerminated{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStateWaiting, InType: reflect.TypeOf(&ContainerStateWaiting{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStatus, InType: reflect.TypeOf(&ContainerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DaemonEndpoint, InType: reflect.TypeOf(&DaemonEndpoint{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIProjection, InType: reflect.TypeOf(&DownwardAPIProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIVolumeFile, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIVolumeSource, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EmptyDirVolumeSource, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointAddress, InType: reflect.TypeOf(&EndpointAddress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointPort, InType: reflect.TypeOf(&EndpointPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointSubset, InType: reflect.TypeOf(&EndpointSubset{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Endpoints, InType: reflect.TypeOf(&Endpoints{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointsList, InType: reflect.TypeOf(&EndpointsList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EnvFromSource, InType: reflect.TypeOf(&EnvFromSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EnvVar, InType: reflect.TypeOf(&EnvVar{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EnvVarSource, InType: reflect.TypeOf(&EnvVarSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Event, InType: reflect.TypeOf(&Event{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EventList, InType: reflect.TypeOf(&EventList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EventSource, InType: reflect.TypeOf(&EventSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ExecAction, InType: reflect.TypeOf(&ExecAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_FCVolumeSource, InType: reflect.TypeOf(&FCVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_FlexVolumeSource, InType: reflect.TypeOf(&FlexVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_FlockerVolumeSource, InType: reflect.TypeOf(&FlockerVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GCEPersistentDiskVolumeSource, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GitRepoVolumeSource, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GlusterfsVolumeSource, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HTTPGetAction, InType: reflect.TypeOf(&HTTPGetAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HTTPHeader, InType: reflect.TypeOf(&HTTPHeader{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Handler, InType: reflect.TypeOf(&Handler{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HostAlias, InType: reflect.TypeOf(&HostAlias{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HostPathVolumeSource, InType: reflect.TypeOf(&HostPathVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ISCSIVolumeSource, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_KeyToPath, InType: reflect.TypeOf(&KeyToPath{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Lifecycle, InType: reflect.TypeOf(&Lifecycle{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LimitRange, InType: reflect.TypeOf(&LimitRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LimitRangeItem, InType: reflect.TypeOf(&LimitRangeItem{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LimitRangeList, InType: reflect.TypeOf(&LimitRangeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LimitRangeSpec, InType: reflect.TypeOf(&LimitRangeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_List, InType: reflect.TypeOf(&List{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ListOptions, InType: reflect.TypeOf(&ListOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LoadBalancerIngress, InType: reflect.TypeOf(&LoadBalancerIngress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LoadBalancerStatus, InType: reflect.TypeOf(&LoadBalancerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LocalObjectReference, InType: reflect.TypeOf(&LocalObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LocalVolumeSource, InType: reflect.TypeOf(&LocalVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NFSVolumeSource, InType: reflect.TypeOf(&NFSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Namespace, InType: reflect.TypeOf(&Namespace{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NamespaceList, InType: reflect.TypeOf(&NamespaceList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NamespaceSpec, InType: reflect.TypeOf(&NamespaceSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NamespaceStatus, InType: reflect.TypeOf(&NamespaceStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Node, InType: reflect.TypeOf(&Node{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeAddress, InType: reflect.TypeOf(&NodeAddress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeAffinity, InType: reflect.TypeOf(&NodeAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeCondition, InType: reflect.TypeOf(&NodeCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeDaemonEndpoints, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeList, InType: reflect.TypeOf(&NodeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeProxyOptions, InType: reflect.TypeOf(&NodeProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeResources, InType: reflect.TypeOf(&NodeResources{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSelector, InType: reflect.TypeOf(&NodeSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSelectorRequirement, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSelectorTerm, InType: reflect.TypeOf(&NodeSelectorTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSpec, InType: reflect.TypeOf(&NodeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeStatus, InType: reflect.TypeOf(&NodeStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSystemInfo, InType: reflect.TypeOf(&NodeSystemInfo{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectFieldSelector, InType: reflect.TypeOf(&ObjectFieldSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectMeta, InType: reflect.TypeOf(&ObjectMeta{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectReference, InType: reflect.TypeOf(&ObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolume, InType: reflect.TypeOf(&PersistentVolume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaim, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimList, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimSpec, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimStatus, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimVolumeSource, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeList, InType: reflect.TypeOf(&PersistentVolumeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeSource, InType: reflect.TypeOf(&PersistentVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeSpec, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeStatus, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PhotonPersistentDiskVolumeSource, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Pod, InType: reflect.TypeOf(&Pod{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAffinity, InType: reflect.TypeOf(&PodAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAffinityTerm, InType: reflect.TypeOf(&PodAffinityTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAntiAffinity, InType: reflect.TypeOf(&PodAntiAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAttachOptions, InType: reflect.TypeOf(&PodAttachOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodCondition, InType: reflect.TypeOf(&PodCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodExecOptions, InType: reflect.TypeOf(&PodExecOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodList, InType: reflect.TypeOf(&PodList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodLogOptions, InType: reflect.TypeOf(&PodLogOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodPortForwardOptions, InType: reflect.TypeOf(&PodPortForwardOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodProxyOptions, InType: reflect.TypeOf(&PodProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodSecurityContext, InType: reflect.TypeOf(&PodSecurityContext{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodSignature, InType: reflect.TypeOf(&PodSignature{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodSpec, InType: reflect.TypeOf(&PodSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodStatus, InType: reflect.TypeOf(&PodStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodStatusResult, InType: reflect.TypeOf(&PodStatusResult{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplate, InType: reflect.TypeOf(&PodTemplate{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplateList, InType: reflect.TypeOf(&PodTemplateList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplateSpec, InType: reflect.TypeOf(&PodTemplateSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PortworxVolumeSource, InType: reflect.TypeOf(&PortworxVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Preconditions, InType: reflect.TypeOf(&Preconditions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PreferAvoidPodsEntry, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PreferredSchedulingTerm, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Probe, InType: reflect.TypeOf(&Probe{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ProjectedVolumeSource, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_QuobyteVolumeSource, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_RBDVolumeSource, InType: reflect.TypeOf(&RBDVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_RangeAllocation, InType: reflect.TypeOf(&RangeAllocation{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationController, InType: reflect.TypeOf(&ReplicationController{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationControllerCondition, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationControllerList, InType: reflect.TypeOf(&ReplicationControllerList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationControllerSpec, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationControllerStatus, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceFieldSelector, InType: reflect.TypeOf(&ResourceFieldSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuota, InType: reflect.TypeOf(&ResourceQuota{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuotaList, InType: reflect.TypeOf(&ResourceQuotaList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuotaSpec, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuotaStatus, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceRequirements, InType: reflect.TypeOf(&ResourceRequirements{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SELinuxOptions, InType: reflect.TypeOf(&SELinuxOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ScaleIOVolumeSource, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Secret, InType: reflect.TypeOf(&Secret{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretEnvSource, InType: reflect.TypeOf(&SecretEnvSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretKeySelector, InType: reflect.TypeOf(&SecretKeySelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretList, InType: reflect.TypeOf(&SecretList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretProjection, InType: reflect.TypeOf(&SecretProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretVolumeSource, InType: reflect.TypeOf(&SecretVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecurityContext, InType: reflect.TypeOf(&SecurityContext{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SerializedReference, InType: reflect.TypeOf(&SerializedReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Service, InType: reflect.TypeOf(&Service{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceAccount, InType: reflect.TypeOf(&ServiceAccount{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceAccountList, InType: reflect.TypeOf(&ServiceAccountList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceList, InType: reflect.TypeOf(&ServiceList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServicePort, InType: reflect.TypeOf(&ServicePort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceProxyOptions, InType: reflect.TypeOf(&ServiceProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceSpec, InType: reflect.TypeOf(&ServiceSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceStatus, InType: reflect.TypeOf(&ServiceStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_StorageOSPersistentVolumeSource, InType: reflect.TypeOf(&StorageOSPersistentVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_StorageOSVolumeSource, InType: reflect.TypeOf(&StorageOSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Sysctl, InType: reflect.TypeOf(&Sysctl{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TCPSocketAction, InType: reflect.TypeOf(&TCPSocketAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Taint, InType: reflect.TypeOf(&Taint{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Toleration, InType: reflect.TypeOf(&Toleration{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Volume, InType: reflect.TypeOf(&Volume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeMount, InType: reflect.TypeOf(&VolumeMount{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeProjection, InType: reflect.TypeOf(&VolumeProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeSource, InType: reflect.TypeOf(&VolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VsphereVirtualDiskVolumeSource, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_WeightedPodAffinityTerm, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, + ) +} + +// DeepCopy_v1_AWSElasticBlockStoreVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_AWSElasticBlockStoreVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AWSElasticBlockStoreVolumeSource) + out := out.(*AWSElasticBlockStoreVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_v1_Affinity is an autogenerated deepcopy function. +func DeepCopy_v1_Affinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Affinity) + out := out.(*Affinity) + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(NodeAffinity) + if err := DeepCopy_v1_NodeAffinity(*in, *out, c); err != nil { + return err + } + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + if err := DeepCopy_v1_PodAffinity(*in, *out, c); err != nil { + return err + } + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + if err := DeepCopy_v1_PodAntiAffinity(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_v1_AttachedVolume is an autogenerated deepcopy function. +func DeepCopy_v1_AttachedVolume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AttachedVolume) + out := out.(*AttachedVolume) + *out = *in + return nil + } +} + +// DeepCopy_v1_AvoidPods is an autogenerated deepcopy function. +func DeepCopy_v1_AvoidPods(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AvoidPods) + out := out.(*AvoidPods) + *out = *in + if in.PreferAvoidPods != nil { + in, out := &in.PreferAvoidPods, &out.PreferAvoidPods + *out = make([]PreferAvoidPodsEntry, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PreferAvoidPodsEntry(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_AzureDiskVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_AzureDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AzureDiskVolumeSource) + out := out.(*AzureDiskVolumeSource) + *out = *in + if in.CachingMode != nil { + in, out := &in.CachingMode, &out.CachingMode + *out = new(AzureDataDiskCachingMode) + **out = **in + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(AzureDataDiskKind) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_AzureFileVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_AzureFileVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AzureFileVolumeSource) + out := out.(*AzureFileVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_v1_Binding is an autogenerated deepcopy function. +func DeepCopy_v1_Binding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Binding) + out := out.(*Binding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + return nil + } +} + +// DeepCopy_v1_Capabilities is an autogenerated deepcopy function. +func DeepCopy_v1_Capabilities(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Capabilities) + out := out.(*Capabilities) + *out = *in + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + if in.Drop != nil { + in, out := &in.Drop, &out.Drop + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_CephFSVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_CephFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CephFSVolumeSource) + out := out.(*CephFSVolumeSource) + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_CinderVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_CinderVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CinderVolumeSource) + out := out.(*CinderVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_v1_ComponentCondition is an autogenerated deepcopy function. +func DeepCopy_v1_ComponentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentCondition) + out := out.(*ComponentCondition) + *out = *in + return nil + } +} + +// DeepCopy_v1_ComponentStatus is an autogenerated deepcopy function. +func DeepCopy_v1_ComponentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentStatus) + out := out.(*ComponentStatus) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ComponentCondition, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_ComponentStatusList is an autogenerated deepcopy function. +func DeepCopy_v1_ComponentStatusList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentStatusList) + out := out.(*ComponentStatusList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComponentStatus, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ComponentStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_ConfigMap is an autogenerated deepcopy function. +func DeepCopy_v1_ConfigMap(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMap) + out := out.(*ConfigMap) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +// DeepCopy_v1_ConfigMapEnvSource is an autogenerated deepcopy function. +func DeepCopy_v1_ConfigMapEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapEnvSource) + out := out.(*ConfigMapEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_ConfigMapKeySelector is an autogenerated deepcopy function. +func DeepCopy_v1_ConfigMapKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapKeySelector) + out := out.(*ConfigMapKeySelector) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_ConfigMapList is an autogenerated deepcopy function. +func DeepCopy_v1_ConfigMapList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapList) + out := out.(*ConfigMapList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigMap, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ConfigMap(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_ConfigMapProjection is an autogenerated deepcopy function. +func DeepCopy_v1_ConfigMapProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapProjection) + out := out.(*ConfigMapProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_ConfigMapVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_ConfigMapVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapVolumeSource) + out := out.(*ConfigMapVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_Container is an autogenerated deepcopy function. +func DeepCopy_v1_Container(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Container) + out := out.(*Container) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + if err := DeepCopy_v1_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + if err := DeepCopy_v1_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if err := DeepCopy_v1_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { + return err + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + if err := DeepCopy_v1_Probe(*in, *out, c); err != nil { + return err + } + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + if err := DeepCopy_v1_Probe(*in, *out, c); err != nil { + return err + } + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + if err := DeepCopy_v1_Lifecycle(*in, *out, c); err != nil { + return err + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + if err := DeepCopy_v1_SecurityContext(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_v1_ContainerImage is an autogenerated deepcopy function. +func DeepCopy_v1_ContainerImage(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerImage) + out := out.(*ContainerImage) + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_ContainerPort is an autogenerated deepcopy function. +func DeepCopy_v1_ContainerPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerPort) + out := out.(*ContainerPort) + *out = *in + return nil + } +} + +// DeepCopy_v1_ContainerState is an autogenerated deepcopy function. +func DeepCopy_v1_ContainerState(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerState) + out := out.(*ContainerState) + *out = *in + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + *out = new(ContainerStateWaiting) + **out = **in + } + if in.Running != nil { + in, out := &in.Running, &out.Running + *out = new(ContainerStateRunning) + if err := DeepCopy_v1_ContainerStateRunning(*in, *out, c); err != nil { + return err + } + } + if in.Terminated != nil { + in, out := &in.Terminated, &out.Terminated + *out = new(ContainerStateTerminated) + if err := DeepCopy_v1_ContainerStateTerminated(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_v1_ContainerStateRunning is an autogenerated deepcopy function. +func DeepCopy_v1_ContainerStateRunning(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateRunning) + out := out.(*ContainerStateRunning) + *out = *in + out.StartedAt = in.StartedAt.DeepCopy() + return nil + } +} + +// DeepCopy_v1_ContainerStateTerminated is an autogenerated deepcopy function. +func DeepCopy_v1_ContainerStateTerminated(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateTerminated) + out := out.(*ContainerStateTerminated) + *out = *in + out.StartedAt = in.StartedAt.DeepCopy() + out.FinishedAt = in.FinishedAt.DeepCopy() + return nil + } +} + +// DeepCopy_v1_ContainerStateWaiting is an autogenerated deepcopy function. +func DeepCopy_v1_ContainerStateWaiting(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateWaiting) + out := out.(*ContainerStateWaiting) + *out = *in + return nil + } +} + +// DeepCopy_v1_ContainerStatus is an autogenerated deepcopy function. +func DeepCopy_v1_ContainerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStatus) + out := out.(*ContainerStatus) + *out = *in + if err := DeepCopy_v1_ContainerState(&in.State, &out.State, c); err != nil { + return err + } + if err := DeepCopy_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_DaemonEndpoint is an autogenerated deepcopy function. +func DeepCopy_v1_DaemonEndpoint(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonEndpoint) + out := out.(*DaemonEndpoint) + *out = *in + return nil + } +} + +// DeepCopy_v1_DeleteOptions is an autogenerated deepcopy function. +func DeepCopy_v1_DeleteOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeleteOptions) + out := out.(*DeleteOptions) + *out = *in + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + *out = new(Preconditions) + if err := DeepCopy_v1_Preconditions(*in, *out, c); err != nil { + return err + } + } + if in.OrphanDependents != nil { + in, out := &in.OrphanDependents, &out.OrphanDependents + *out = new(bool) + **out = **in + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + *out = new(DeletionPropagation) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_DownwardAPIProjection is an autogenerated deepcopy function. +func DeepCopy_v1_DownwardAPIProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIProjection) + out := out.(*DownwardAPIProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := DeepCopy_v1_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_DownwardAPIVolumeFile is an autogenerated deepcopy function. +func DeepCopy_v1_DownwardAPIVolumeFile(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIVolumeFile) + out := out.(*DownwardAPIVolumeFile) + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_v1_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_DownwardAPIVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_DownwardAPIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIVolumeSource) + out := out.(*DownwardAPIVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := DeepCopy_v1_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_EmptyDirVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_EmptyDirVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EmptyDirVolumeSource) + out := out.(*EmptyDirVolumeSource) + *out = *in + out.SizeLimit = in.SizeLimit.DeepCopy() + return nil + } +} + +// DeepCopy_v1_EndpointAddress is an autogenerated deepcopy function. +func DeepCopy_v1_EndpointAddress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointAddress) + out := out.(*EndpointAddress) + *out = *in + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_EndpointPort is an autogenerated deepcopy function. +func DeepCopy_v1_EndpointPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointPort) + out := out.(*EndpointPort) + *out = *in + return nil + } +} + +// DeepCopy_v1_EndpointSubset is an autogenerated deepcopy function. +func DeepCopy_v1_EndpointSubset(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointSubset) + out := out.(*EndpointSubset) + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + if err := DeepCopy_v1_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.NotReadyAddresses != nil { + in, out := &in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + if err := DeepCopy_v1_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_Endpoints is an autogenerated deepcopy function. +func DeepCopy_v1_Endpoints(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Endpoints) + out := out.(*Endpoints) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]EndpointSubset, len(*in)) + for i := range *in { + if err := DeepCopy_v1_EndpointSubset(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_EndpointsList is an autogenerated deepcopy function. +func DeepCopy_v1_EndpointsList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointsList) + out := out.(*EndpointsList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoints, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Endpoints(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_EnvFromSource is an autogenerated deepcopy function. +func DeepCopy_v1_EnvFromSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvFromSource) + out := out.(*EnvFromSource) + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(ConfigMapEnvSource) + if err := DeepCopy_v1_ConfigMapEnvSource(*in, *out, c); err != nil { + return err + } + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretEnvSource) + if err := DeepCopy_v1_SecretEnvSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_v1_EnvVar is an autogenerated deepcopy function. +func DeepCopy_v1_EnvVar(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvVar) + out := out.(*EnvVar) + *out = *in + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(EnvVarSource) + if err := DeepCopy_v1_EnvVarSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_v1_EnvVarSource is an autogenerated deepcopy function. +func DeepCopy_v1_EnvVarSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvVarSource) + out := out.(*EnvVarSource) + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_v1_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(ConfigMapKeySelector) + if err := DeepCopy_v1_ConfigMapKeySelector(*in, *out, c); err != nil { + return err + } + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(SecretKeySelector) + if err := DeepCopy_v1_SecretKeySelector(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_v1_Event is an autogenerated deepcopy function. +func DeepCopy_v1_Event(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Event) + out := out.(*Event) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + out.FirstTimestamp = in.FirstTimestamp.DeepCopy() + out.LastTimestamp = in.LastTimestamp.DeepCopy() + return nil + } +} + +// DeepCopy_v1_EventList is an autogenerated deepcopy function. +func DeepCopy_v1_EventList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EventList) + out := out.(*EventList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Event(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_EventSource is an autogenerated deepcopy function. +func DeepCopy_v1_EventSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EventSource) + out := out.(*EventSource) + *out = *in + return nil + } +} + +// DeepCopy_v1_ExecAction is an autogenerated deepcopy function. +func DeepCopy_v1_ExecAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExecAction) + out := out.(*ExecAction) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_FCVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_FCVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FCVolumeSource) + out := out.(*FCVolumeSource) + *out = *in + if in.TargetWWNs != nil { + in, out := &in.TargetWWNs, &out.TargetWWNs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_FlexVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_FlexVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FlexVolumeSource) + out := out.(*FlexVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +// DeepCopy_v1_FlockerVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_FlockerVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FlockerVolumeSource) + out := out.(*FlockerVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_v1_GCEPersistentDiskVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_GCEPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GCEPersistentDiskVolumeSource) + out := out.(*GCEPersistentDiskVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_v1_GitRepoVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_GitRepoVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GitRepoVolumeSource) + out := out.(*GitRepoVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_v1_GlusterfsVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_GlusterfsVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GlusterfsVolumeSource) + out := out.(*GlusterfsVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_v1_HTTPGetAction is an autogenerated deepcopy function. +func DeepCopy_v1_HTTPGetAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPGetAction) + out := out.(*HTTPGetAction) + *out = *in + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_HTTPHeader is an autogenerated deepcopy function. +func DeepCopy_v1_HTTPHeader(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPHeader) + out := out.(*HTTPHeader) + *out = *in + return nil + } +} + +// DeepCopy_v1_Handler is an autogenerated deepcopy function. +func DeepCopy_v1_Handler(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Handler) + out := out.(*Handler) + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecAction) + if err := DeepCopy_v1_ExecAction(*in, *out, c); err != nil { + return err + } + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + if err := DeepCopy_v1_HTTPGetAction(*in, *out, c); err != nil { + return err + } + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_HostAlias is an autogenerated deepcopy function. +func DeepCopy_v1_HostAlias(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HostAlias) + out := out.(*HostAlias) + *out = *in + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_HostPathVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_HostPathVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HostPathVolumeSource) + out := out.(*HostPathVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_v1_ISCSIVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_ISCSIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ISCSIVolumeSource) + out := out.(*ISCSIVolumeSource) + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_KeyToPath is an autogenerated deepcopy function. +func DeepCopy_v1_KeyToPath(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*KeyToPath) + out := out.(*KeyToPath) + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_Lifecycle is an autogenerated deepcopy function. +func DeepCopy_v1_Lifecycle(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Lifecycle) + out := out.(*Lifecycle) + *out = *in + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + *out = new(Handler) + if err := DeepCopy_v1_Handler(*in, *out, c); err != nil { + return err + } + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + *out = new(Handler) + if err := DeepCopy_v1_Handler(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_v1_LimitRange is an autogenerated deepcopy function. +func DeepCopy_v1_LimitRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRange) + out := out.(*LimitRange) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_LimitRangeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_LimitRangeItem is an autogenerated deepcopy function. +func DeepCopy_v1_LimitRangeItem(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeItem) + out := out.(*LimitRangeItem) + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.DefaultRequest != nil { + in, out := &in.DefaultRequest, &out.DefaultRequest + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxLimitRequestRatio != nil { + in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +// DeepCopy_v1_LimitRangeList is an autogenerated deepcopy function. +func DeepCopy_v1_LimitRangeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeList) + out := out.(*LimitRangeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LimitRange, len(*in)) + for i := range *in { + if err := DeepCopy_v1_LimitRange(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_LimitRangeSpec is an autogenerated deepcopy function. +func DeepCopy_v1_LimitRangeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeSpec) + out := out.(*LimitRangeSpec) + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make([]LimitRangeItem, len(*in)) + for i := range *in { + if err := DeepCopy_v1_LimitRangeItem(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_List is an autogenerated deepcopy function. +func DeepCopy_v1_List(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*List) + out := out.(*List) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*runtime.RawExtension) + } + } + } + return nil + } +} + +// DeepCopy_v1_ListOptions is an autogenerated deepcopy function. +func DeepCopy_v1_ListOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ListOptions) + out := out.(*ListOptions) + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_LoadBalancerIngress is an autogenerated deepcopy function. +func DeepCopy_v1_LoadBalancerIngress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LoadBalancerIngress) + out := out.(*LoadBalancerIngress) + *out = *in + return nil + } +} + +// DeepCopy_v1_LoadBalancerStatus is an autogenerated deepcopy function. +func DeepCopy_v1_LoadBalancerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LoadBalancerStatus) + out := out.(*LoadBalancerStatus) + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]LoadBalancerIngress, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_LocalObjectReference is an autogenerated deepcopy function. +func DeepCopy_v1_LocalObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalObjectReference) + out := out.(*LocalObjectReference) + *out = *in + return nil + } +} + +// DeepCopy_v1_LocalVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_LocalVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalVolumeSource) + out := out.(*LocalVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_v1_NFSVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_NFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NFSVolumeSource) + out := out.(*NFSVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_v1_Namespace is an autogenerated deepcopy function. +func DeepCopy_v1_Namespace(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Namespace) + out := out.(*Namespace) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_NamespaceSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_NamespaceList is an autogenerated deepcopy function. +func DeepCopy_v1_NamespaceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceList) + out := out.(*NamespaceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Namespace, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Namespace(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_NamespaceSpec is an autogenerated deepcopy function. +func DeepCopy_v1_NamespaceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceSpec) + out := out.(*NamespaceSpec) + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]FinalizerName, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_NamespaceStatus is an autogenerated deepcopy function. +func DeepCopy_v1_NamespaceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceStatus) + out := out.(*NamespaceStatus) + *out = *in + return nil + } +} + +// DeepCopy_v1_Node is an autogenerated deepcopy function. +func DeepCopy_v1_Node(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Node) + out := out.(*Node) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_NodeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_NodeStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_NodeAddress is an autogenerated deepcopy function. +func DeepCopy_v1_NodeAddress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeAddress) + out := out.(*NodeAddress) + *out = *in + return nil + } +} + +// DeepCopy_v1_NodeAffinity is an autogenerated deepcopy function. +func DeepCopy_v1_NodeAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeAffinity) + out := out.(*NodeAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = new(NodeSelector) + if err := DeepCopy_v1_NodeSelector(*in, *out, c); err != nil { + return err + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]PreferredSchedulingTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PreferredSchedulingTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_NodeCondition is an autogenerated deepcopy function. +func DeepCopy_v1_NodeCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeCondition) + out := out.(*NodeCondition) + *out = *in + out.LastHeartbeatTime = in.LastHeartbeatTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +// DeepCopy_v1_NodeDaemonEndpoints is an autogenerated deepcopy function. +func DeepCopy_v1_NodeDaemonEndpoints(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeDaemonEndpoints) + out := out.(*NodeDaemonEndpoints) + *out = *in + return nil + } +} + +// DeepCopy_v1_NodeList is an autogenerated deepcopy function. +func DeepCopy_v1_NodeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeList) + out := out.(*NodeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Node(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_NodeProxyOptions is an autogenerated deepcopy function. +func DeepCopy_v1_NodeProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeProxyOptions) + out := out.(*NodeProxyOptions) + *out = *in + return nil + } +} + +// DeepCopy_v1_NodeResources is an autogenerated deepcopy function. +func DeepCopy_v1_NodeResources(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeResources) + out := out.(*NodeResources) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +// DeepCopy_v1_NodeSelector is an autogenerated deepcopy function. +func DeepCopy_v1_NodeSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelector) + out := out.(*NodeSelector) + *out = *in + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_NodeSelectorTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_NodeSelectorRequirement is an autogenerated deepcopy function. +func DeepCopy_v1_NodeSelectorRequirement(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelectorRequirement) + out := out.(*NodeSelectorRequirement) + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_NodeSelectorTerm is an autogenerated deepcopy function. +func DeepCopy_v1_NodeSelectorTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelectorTerm) + out := out.(*NodeSelectorTerm) + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + if err := DeepCopy_v1_NodeSelectorRequirement(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_NodeSpec is an autogenerated deepcopy function. +func DeepCopy_v1_NodeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSpec) + out := out.(*NodeSpec) + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Taint(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_NodeStatus is an autogenerated deepcopy function. +func DeepCopy_v1_NodeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeStatus) + out := out.(*NodeStatus) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodeCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1_NodeCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]NodeAddress, len(*in)) + copy(*out, *in) + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ContainerImage, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ContainerImage(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.VolumesInUse != nil { + in, out := &in.VolumesInUse, &out.VolumesInUse + *out = make([]UniqueVolumeName, len(*in)) + copy(*out, *in) + } + if in.VolumesAttached != nil { + in, out := &in.VolumesAttached, &out.VolumesAttached + *out = make([]AttachedVolume, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_NodeSystemInfo is an autogenerated deepcopy function. +func DeepCopy_v1_NodeSystemInfo(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSystemInfo) + out := out.(*NodeSystemInfo) + *out = *in + return nil + } +} + +// DeepCopy_v1_ObjectFieldSelector is an autogenerated deepcopy function. +func DeepCopy_v1_ObjectFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectFieldSelector) + out := out.(*ObjectFieldSelector) + *out = *in + return nil + } +} + +// DeepCopy_v1_ObjectMeta is an autogenerated deepcopy function. +func DeepCopy_v1_ObjectMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMeta) + out := out.(*ObjectMeta) + *out = *in + out.CreationTimestamp = in.CreationTimestamp.DeepCopy() + if in.DeletionTimestamp != nil { + in, out := &in.DeletionTimestamp, &out.DeletionTimestamp + *out = new(meta_v1.Time) + **out = (*in).DeepCopy() + } + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]meta_v1.OwnerReference, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*meta_v1.OwnerReference) + } + } + } + if in.Initializers != nil { + in, out := &in.Initializers, &out.Initializers + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*meta_v1.Initializers) + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_ObjectReference is an autogenerated deepcopy function. +func DeepCopy_v1_ObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectReference) + out := out.(*ObjectReference) + *out = *in + return nil + } +} + +// DeepCopy_v1_PersistentVolume is an autogenerated deepcopy function. +func DeepCopy_v1_PersistentVolume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolume) + out := out.(*PersistentVolume) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_PersistentVolumeClaim is an autogenerated deepcopy function. +func DeepCopy_v1_PersistentVolumeClaim(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaim) + out := out.(*PersistentVolumeClaim) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_PersistentVolumeClaimList is an autogenerated deepcopy function. +func DeepCopy_v1_PersistentVolumeClaimList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimList) + out := out.(*PersistentVolumeClaimList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_PersistentVolumeClaimSpec is an autogenerated deepcopy function. +func DeepCopy_v1_PersistentVolumeClaimSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimSpec) + out := out.(*PersistentVolumeClaimSpec) + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*meta_v1.LabelSelector) + } + } + if err := DeepCopy_v1_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { + return err + } + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_PersistentVolumeClaimStatus is an autogenerated deepcopy function. +func DeepCopy_v1_PersistentVolumeClaimStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimStatus) + out := out.(*PersistentVolumeClaimStatus) + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +// DeepCopy_v1_PersistentVolumeClaimVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_PersistentVolumeClaimVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimVolumeSource) + out := out.(*PersistentVolumeClaimVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_v1_PersistentVolumeList is an autogenerated deepcopy function. +func DeepCopy_v1_PersistentVolumeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeList) + out := out.(*PersistentVolumeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PersistentVolume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_PersistentVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_PersistentVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeSource) + out := out.(*PersistentVolumeSource) + *out = *in + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + **out = **in + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := DeepCopy_v1_RBDVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := DeepCopy_v1_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + **out = **in + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := DeepCopy_v1_CephFSVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + if err := DeepCopy_v1_FCVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := DeepCopy_v1_FlexVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + if err := DeepCopy_v1_AzureDiskVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_v1_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Local != nil { + in, out := &in.Local, &out.Local + *out = new(LocalVolumeSource) + **out = **in + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + *out = new(StorageOSPersistentVolumeSource) + if err := DeepCopy_v1_StorageOSPersistentVolumeSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_v1_PersistentVolumeSpec is an autogenerated deepcopy function. +func DeepCopy_v1_PersistentVolumeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeSpec) + out := out.(*PersistentVolumeSpec) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if err := DeepCopy_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil { + return err + } + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.ClaimRef != nil { + in, out := &in.ClaimRef, &out.ClaimRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_PersistentVolumeStatus is an autogenerated deepcopy function. +func DeepCopy_v1_PersistentVolumeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeStatus) + out := out.(*PersistentVolumeStatus) + *out = *in + return nil + } +} + +// DeepCopy_v1_PhotonPersistentDiskVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_PhotonPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PhotonPersistentDiskVolumeSource) + out := out.(*PhotonPersistentDiskVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_v1_Pod is an autogenerated deepcopy function. +func DeepCopy_v1_Pod(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Pod) + out := out.(*Pod) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PodSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_PodStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_PodAffinity is an autogenerated deepcopy function. +func DeepCopy_v1_PodAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAffinity) + out := out.(*PodAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_PodAffinityTerm is an autogenerated deepcopy function. +func DeepCopy_v1_PodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAffinityTerm) + out := out.(*PodAffinityTerm) + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*meta_v1.LabelSelector) + } + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_PodAntiAffinity is an autogenerated deepcopy function. +func DeepCopy_v1_PodAntiAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAntiAffinity) + out := out.(*PodAntiAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_PodAttachOptions is an autogenerated deepcopy function. +func DeepCopy_v1_PodAttachOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAttachOptions) + out := out.(*PodAttachOptions) + *out = *in + return nil + } +} + +// DeepCopy_v1_PodCondition is an autogenerated deepcopy function. +func DeepCopy_v1_PodCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodCondition) + out := out.(*PodCondition) + *out = *in + out.LastProbeTime = in.LastProbeTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +// DeepCopy_v1_PodExecOptions is an autogenerated deepcopy function. +func DeepCopy_v1_PodExecOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodExecOptions) + out := out.(*PodExecOptions) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_PodList is an autogenerated deepcopy function. +func DeepCopy_v1_PodList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodList) + out := out.(*PodList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Pod(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_PodLogOptions is an autogenerated deepcopy function. +func DeepCopy_v1_PodLogOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodLogOptions) + out := out.(*PodLogOptions) + *out = *in + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = new(meta_v1.Time) + **out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_PodPortForwardOptions is an autogenerated deepcopy function. +func DeepCopy_v1_PodPortForwardOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPortForwardOptions) + out := out.(*PodPortForwardOptions) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_PodProxyOptions is an autogenerated deepcopy function. +func DeepCopy_v1_PodProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodProxyOptions) + out := out.(*PodProxyOptions) + *out = *in + return nil + } +} + +// DeepCopy_v1_PodSecurityContext is an autogenerated deepcopy function. +func DeepCopy_v1_PodSecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityContext) + out := out.(*PodSecurityContext) + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(types.UnixUserID) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]types.UnixGroupID, len(*in)) + copy(*out, *in) + } + if in.FSGroup != nil { + in, out := &in.FSGroup, &out.FSGroup + *out = new(types.UnixGroupID) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_PodSignature is an autogenerated deepcopy function. +func DeepCopy_v1_PodSignature(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSignature) + out := out.(*PodSignature) + *out = *in + if in.PodController != nil { + in, out := &in.PodController, &out.PodController + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*meta_v1.OwnerReference) + } + } + return nil + } +} + +// DeepCopy_v1_PodSpec is an autogenerated deepcopy function. +func DeepCopy_v1_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSpec) + out := out.(*PodSpec) + *out = *in + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Volume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]Container, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Container(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]Container, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Container(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(PodSecurityContext) + if err := DeepCopy_v1_PodSecurityContext(*in, *out, c); err != nil { + return err + } + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(Affinity) + if err := DeepCopy_v1_Affinity(*in, *out, c); err != nil { + return err + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Toleration(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]HostAlias, len(*in)) + for i := range *in { + if err := DeepCopy_v1_HostAlias(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_PodStatus is an autogenerated deepcopy function. +func DeepCopy_v1_PodStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodStatus) + out := out.(*PodStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PodCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PodCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(meta_v1.Time) + **out = (*in).DeepCopy() + } + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.ContainerStatuses != nil { + in, out := &in.ContainerStatuses, &out.ContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_PodStatusResult is an autogenerated deepcopy function. +func DeepCopy_v1_PodStatusResult(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodStatusResult) + out := out.(*PodStatusResult) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PodStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_PodTemplate is an autogenerated deepcopy function. +func DeepCopy_v1_PodTemplate(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplate) + out := out.(*PodTemplate) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_PodTemplateList is an autogenerated deepcopy function. +func DeepCopy_v1_PodTemplateList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplateList) + out := out.(*PodTemplateList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PodTemplate(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_PodTemplateSpec is an autogenerated deepcopy function. +func DeepCopy_v1_PodTemplateSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplateSpec) + out := out.(*PodTemplateSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PodSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_PortworxVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_PortworxVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PortworxVolumeSource) + out := out.(*PortworxVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_v1_Preconditions is an autogenerated deepcopy function. +func DeepCopy_v1_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Preconditions) + out := out.(*Preconditions) + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_PreferAvoidPodsEntry is an autogenerated deepcopy function. +func DeepCopy_v1_PreferAvoidPodsEntry(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PreferAvoidPodsEntry) + out := out.(*PreferAvoidPodsEntry) + *out = *in + if err := DeepCopy_v1_PodSignature(&in.PodSignature, &out.PodSignature, c); err != nil { + return err + } + out.EvictionTime = in.EvictionTime.DeepCopy() + return nil + } +} + +// DeepCopy_v1_PreferredSchedulingTerm is an autogenerated deepcopy function. +func DeepCopy_v1_PreferredSchedulingTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PreferredSchedulingTerm) + out := out.(*PreferredSchedulingTerm) + *out = *in + if err := DeepCopy_v1_NodeSelectorTerm(&in.Preference, &out.Preference, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_Probe is an autogenerated deepcopy function. +func DeepCopy_v1_Probe(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Probe) + out := out.(*Probe) + *out = *in + if err := DeepCopy_v1_Handler(&in.Handler, &out.Handler, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_ProjectedVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_ProjectedVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ProjectedVolumeSource) + out := out.(*ProjectedVolumeSource) + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]VolumeProjection, len(*in)) + for i := range *in { + if err := DeepCopy_v1_VolumeProjection(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_QuobyteVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_QuobyteVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*QuobyteVolumeSource) + out := out.(*QuobyteVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_v1_RBDVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_RBDVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RBDVolumeSource) + out := out.(*RBDVolumeSource) + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_RangeAllocation is an autogenerated deepcopy function. +func DeepCopy_v1_RangeAllocation(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RangeAllocation) + out := out.(*RangeAllocation) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_ReplicationController is an autogenerated deepcopy function. +func DeepCopy_v1_ReplicationController(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationController) + out := out.(*ReplicationController) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_ReplicationControllerStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_ReplicationControllerCondition is an autogenerated deepcopy function. +func DeepCopy_v1_ReplicationControllerCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerCondition) + out := out.(*ReplicationControllerCondition) + *out = *in + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +// DeepCopy_v1_ReplicationControllerList is an autogenerated deepcopy function. +func DeepCopy_v1_ReplicationControllerList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerList) + out := out.(*ReplicationControllerList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ReplicationController(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_ReplicationControllerSpec is an autogenerated deepcopy function. +func DeepCopy_v1_ReplicationControllerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerSpec) + out := out.(*ReplicationControllerSpec) + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(PodTemplateSpec) + if err := DeepCopy_v1_PodTemplateSpec(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_v1_ReplicationControllerStatus is an autogenerated deepcopy function. +func DeepCopy_v1_ReplicationControllerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerStatus) + out := out.(*ReplicationControllerStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicationControllerCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ReplicationControllerCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_ResourceFieldSelector is an autogenerated deepcopy function. +func DeepCopy_v1_ResourceFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceFieldSelector) + out := out.(*ResourceFieldSelector) + *out = *in + out.Divisor = in.Divisor.DeepCopy() + return nil + } +} + +// DeepCopy_v1_ResourceQuota is an autogenerated deepcopy function. +func DeepCopy_v1_ResourceQuota(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuota) + out := out.(*ResourceQuota) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_ResourceQuotaStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_ResourceQuotaList is an autogenerated deepcopy function. +func DeepCopy_v1_ResourceQuotaList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaList) + out := out.(*ResourceQuotaList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceQuota, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ResourceQuota(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_ResourceQuotaSpec is an autogenerated deepcopy function. +func DeepCopy_v1_ResourceQuotaSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaSpec) + out := out.(*ResourceQuotaSpec) + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ResourceQuotaScope, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_ResourceQuotaStatus is an autogenerated deepcopy function. +func DeepCopy_v1_ResourceQuotaStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaStatus) + out := out.(*ResourceQuotaStatus) + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +// DeepCopy_v1_ResourceRequirements is an autogenerated deepcopy function. +func DeepCopy_v1_ResourceRequirements(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceRequirements) + out := out.(*ResourceRequirements) + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +// DeepCopy_v1_SELinuxOptions is an autogenerated deepcopy function. +func DeepCopy_v1_SELinuxOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SELinuxOptions) + out := out.(*SELinuxOptions) + *out = *in + return nil + } +} + +// DeepCopy_v1_ScaleIOVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_ScaleIOVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleIOVolumeSource) + out := out.(*ScaleIOVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_Secret is an autogenerated deepcopy function. +func DeepCopy_v1_Secret(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Secret) + out := out.(*Secret) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string][]byte) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*[]byte) + } + } + } + if in.StringData != nil { + in, out := &in.StringData, &out.StringData + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +// DeepCopy_v1_SecretEnvSource is an autogenerated deepcopy function. +func DeepCopy_v1_SecretEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretEnvSource) + out := out.(*SecretEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_SecretKeySelector is an autogenerated deepcopy function. +func DeepCopy_v1_SecretKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretKeySelector) + out := out.(*SecretKeySelector) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_SecretList is an autogenerated deepcopy function. +func DeepCopy_v1_SecretList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretList) + out := out.(*SecretList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Secret(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_SecretProjection is an autogenerated deepcopy function. +func DeepCopy_v1_SecretProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretProjection) + out := out.(*SecretProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_SecretVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_SecretVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretVolumeSource) + out := out.(*SecretVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_SecurityContext is an autogenerated deepcopy function. +func DeepCopy_v1_SecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecurityContext) + out := out.(*SecurityContext) + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(Capabilities) + if err := DeepCopy_v1_Capabilities(*in, *out, c); err != nil { + return err + } + } + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(types.UnixUserID) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.ReadOnlyRootFilesystem != nil { + in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_SerializedReference is an autogenerated deepcopy function. +func DeepCopy_v1_SerializedReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SerializedReference) + out := out.(*SerializedReference) + *out = *in + return nil + } +} + +// DeepCopy_v1_Service is an autogenerated deepcopy function. +func DeepCopy_v1_Service(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Service) + out := out.(*Service) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_ServiceSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_ServiceStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_ServiceAccount is an autogenerated deepcopy function. +func DeepCopy_v1_ServiceAccount(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceAccount) + out := out.(*ServiceAccount) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_ServiceAccountList is an autogenerated deepcopy function. +func DeepCopy_v1_ServiceAccountList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceAccountList) + out := out.(*ServiceAccountList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccount, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ServiceAccount(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_ServiceList is an autogenerated deepcopy function. +func DeepCopy_v1_ServiceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceList) + out := out.(*ServiceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Service(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_v1_ServicePort is an autogenerated deepcopy function. +func DeepCopy_v1_ServicePort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServicePort) + out := out.(*ServicePort) + *out = *in + return nil + } +} + +// DeepCopy_v1_ServiceProxyOptions is an autogenerated deepcopy function. +func DeepCopy_v1_ServiceProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceProxyOptions) + out := out.(*ServiceProxyOptions) + *out = *in + return nil + } +} + +// DeepCopy_v1_ServiceSpec is an autogenerated deepcopy function. +func DeepCopy_v1_ServiceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceSpec) + out := out.(*ServiceSpec) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ServicePort, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalIPs != nil { + in, out := &in.ExternalIPs, &out.ExternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_v1_ServiceStatus is an autogenerated deepcopy function. +func DeepCopy_v1_ServiceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceStatus) + out := out.(*ServiceStatus) + *out = *in + if err := DeepCopy_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_StorageOSPersistentVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_StorageOSPersistentVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageOSPersistentVolumeSource) + out := out.(*StorageOSPersistentVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_StorageOSVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_StorageOSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageOSVolumeSource) + out := out.(*StorageOSVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_Sysctl is an autogenerated deepcopy function. +func DeepCopy_v1_Sysctl(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Sysctl) + out := out.(*Sysctl) + *out = *in + return nil + } +} + +// DeepCopy_v1_TCPSocketAction is an autogenerated deepcopy function. +func DeepCopy_v1_TCPSocketAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TCPSocketAction) + out := out.(*TCPSocketAction) + *out = *in + return nil + } +} + +// DeepCopy_v1_Taint is an autogenerated deepcopy function. +func DeepCopy_v1_Taint(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Taint) + out := out.(*Taint) + *out = *in + out.TimeAdded = in.TimeAdded.DeepCopy() + return nil + } +} + +// DeepCopy_v1_Toleration is an autogenerated deepcopy function. +func DeepCopy_v1_Toleration(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Toleration) + out := out.(*Toleration) + *out = *in + if in.TolerationSeconds != nil { + in, out := &in.TolerationSeconds, &out.TolerationSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +// DeepCopy_v1_Volume is an autogenerated deepcopy function. +func DeepCopy_v1_Volume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Volume) + out := out.(*Volume) + *out = *in + if err := DeepCopy_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_v1_VolumeMount is an autogenerated deepcopy function. +func DeepCopy_v1_VolumeMount(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeMount) + out := out.(*VolumeMount) + *out = *in + return nil + } +} + +// DeepCopy_v1_VolumeProjection is an autogenerated deepcopy function. +func DeepCopy_v1_VolumeProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeProjection) + out := out.(*VolumeProjection) + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretProjection) + if err := DeepCopy_v1_SecretProjection(*in, *out, c); err != nil { + return err + } + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIProjection) + if err := DeepCopy_v1_DownwardAPIProjection(*in, *out, c); err != nil { + return err + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapProjection) + if err := DeepCopy_v1_ConfigMapProjection(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_v1_VolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeSource) + out := out.(*VolumeSource) + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + **out = **in + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirVolumeSource) + if err := DeepCopy_v1_EmptyDirVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.GitRepo != nil { + in, out := &in.GitRepo, &out.GitRepo + *out = new(GitRepoVolumeSource) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretVolumeSource) + if err := DeepCopy_v1_SecretVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := DeepCopy_v1_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := DeepCopy_v1_RBDVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := DeepCopy_v1_FlexVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + **out = **in + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := DeepCopy_v1_CephFSVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIVolumeSource) + if err := DeepCopy_v1_DownwardAPIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + if err := DeepCopy_v1_FCVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapVolumeSource) + if err := DeepCopy_v1_ConfigMapVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + if err := DeepCopy_v1_AzureDiskVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(ProjectedVolumeSource) + if err := DeepCopy_v1_ProjectedVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_v1_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + *out = new(StorageOSVolumeSource) + if err := DeepCopy_v1_StorageOSVolumeSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_v1_VsphereVirtualDiskVolumeSource is an autogenerated deepcopy function. +func DeepCopy_v1_VsphereVirtualDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VsphereVirtualDiskVolumeSource) + out := out.(*VsphereVirtualDiskVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_v1_WeightedPodAffinityTerm is an autogenerated deepcopy function. +func DeepCopy_v1_WeightedPodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*WeightedPodAffinityTerm) + out := out.(*WeightedPodAffinityTerm) + *out = *in + if err := DeepCopy_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { + return err + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.defaults.go new file mode 100644 index 000000000..31e795066 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.defaults.go @@ -0,0 +1,631 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&ConfigMap{}, func(obj interface{}) { SetObjectDefaults_ConfigMap(obj.(*ConfigMap)) }) + scheme.AddTypeDefaultingFunc(&ConfigMapList{}, func(obj interface{}) { SetObjectDefaults_ConfigMapList(obj.(*ConfigMapList)) }) + scheme.AddTypeDefaultingFunc(&Endpoints{}, func(obj interface{}) { SetObjectDefaults_Endpoints(obj.(*Endpoints)) }) + scheme.AddTypeDefaultingFunc(&EndpointsList{}, func(obj interface{}) { SetObjectDefaults_EndpointsList(obj.(*EndpointsList)) }) + scheme.AddTypeDefaultingFunc(&LimitRange{}, func(obj interface{}) { SetObjectDefaults_LimitRange(obj.(*LimitRange)) }) + scheme.AddTypeDefaultingFunc(&LimitRangeList{}, func(obj interface{}) { SetObjectDefaults_LimitRangeList(obj.(*LimitRangeList)) }) + scheme.AddTypeDefaultingFunc(&Namespace{}, func(obj interface{}) { SetObjectDefaults_Namespace(obj.(*Namespace)) }) + scheme.AddTypeDefaultingFunc(&NamespaceList{}, func(obj interface{}) { SetObjectDefaults_NamespaceList(obj.(*NamespaceList)) }) + scheme.AddTypeDefaultingFunc(&Node{}, func(obj interface{}) { SetObjectDefaults_Node(obj.(*Node)) }) + scheme.AddTypeDefaultingFunc(&NodeList{}, func(obj interface{}) { SetObjectDefaults_NodeList(obj.(*NodeList)) }) + scheme.AddTypeDefaultingFunc(&PersistentVolume{}, func(obj interface{}) { SetObjectDefaults_PersistentVolume(obj.(*PersistentVolume)) }) + scheme.AddTypeDefaultingFunc(&PersistentVolumeClaim{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeClaim(obj.(*PersistentVolumeClaim)) }) + scheme.AddTypeDefaultingFunc(&PersistentVolumeClaimList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeClaimList(obj.(*PersistentVolumeClaimList)) }) + scheme.AddTypeDefaultingFunc(&PersistentVolumeList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeList(obj.(*PersistentVolumeList)) }) + scheme.AddTypeDefaultingFunc(&Pod{}, func(obj interface{}) { SetObjectDefaults_Pod(obj.(*Pod)) }) + scheme.AddTypeDefaultingFunc(&PodAttachOptions{}, func(obj interface{}) { SetObjectDefaults_PodAttachOptions(obj.(*PodAttachOptions)) }) + scheme.AddTypeDefaultingFunc(&PodExecOptions{}, func(obj interface{}) { SetObjectDefaults_PodExecOptions(obj.(*PodExecOptions)) }) + scheme.AddTypeDefaultingFunc(&PodList{}, func(obj interface{}) { SetObjectDefaults_PodList(obj.(*PodList)) }) + scheme.AddTypeDefaultingFunc(&PodTemplate{}, func(obj interface{}) { SetObjectDefaults_PodTemplate(obj.(*PodTemplate)) }) + scheme.AddTypeDefaultingFunc(&PodTemplateList{}, func(obj interface{}) { SetObjectDefaults_PodTemplateList(obj.(*PodTemplateList)) }) + scheme.AddTypeDefaultingFunc(&ReplicationController{}, func(obj interface{}) { SetObjectDefaults_ReplicationController(obj.(*ReplicationController)) }) + scheme.AddTypeDefaultingFunc(&ReplicationControllerList{}, func(obj interface{}) { SetObjectDefaults_ReplicationControllerList(obj.(*ReplicationControllerList)) }) + scheme.AddTypeDefaultingFunc(&ResourceQuota{}, func(obj interface{}) { SetObjectDefaults_ResourceQuota(obj.(*ResourceQuota)) }) + scheme.AddTypeDefaultingFunc(&ResourceQuotaList{}, func(obj interface{}) { SetObjectDefaults_ResourceQuotaList(obj.(*ResourceQuotaList)) }) + scheme.AddTypeDefaultingFunc(&Secret{}, func(obj interface{}) { SetObjectDefaults_Secret(obj.(*Secret)) }) + scheme.AddTypeDefaultingFunc(&SecretList{}, func(obj interface{}) { SetObjectDefaults_SecretList(obj.(*SecretList)) }) + scheme.AddTypeDefaultingFunc(&Service{}, func(obj interface{}) { SetObjectDefaults_Service(obj.(*Service)) }) + scheme.AddTypeDefaultingFunc(&ServiceList{}, func(obj interface{}) { SetObjectDefaults_ServiceList(obj.(*ServiceList)) }) + return nil +} + +func SetObjectDefaults_ConfigMap(in *ConfigMap) { + SetDefaults_ConfigMap(in) +} + +func SetObjectDefaults_ConfigMapList(in *ConfigMapList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ConfigMap(a) + } +} + +func SetObjectDefaults_Endpoints(in *Endpoints) { + SetDefaults_Endpoints(in) +} + +func SetObjectDefaults_EndpointsList(in *EndpointsList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Endpoints(a) + } +} + +func SetObjectDefaults_LimitRange(in *LimitRange) { + for i := range in.Spec.Limits { + a := &in.Spec.Limits[i] + SetDefaults_LimitRangeItem(a) + SetDefaults_ResourceList(&a.Max) + SetDefaults_ResourceList(&a.Min) + SetDefaults_ResourceList(&a.Default) + SetDefaults_ResourceList(&a.DefaultRequest) + SetDefaults_ResourceList(&a.MaxLimitRequestRatio) + } +} + +func SetObjectDefaults_LimitRangeList(in *LimitRangeList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_LimitRange(a) + } +} + +func SetObjectDefaults_Namespace(in *Namespace) { + SetDefaults_NamespaceStatus(&in.Status) +} + +func SetObjectDefaults_NamespaceList(in *NamespaceList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Namespace(a) + } +} + +func SetObjectDefaults_Node(in *Node) { + SetDefaults_Node(in) + SetDefaults_NodeStatus(&in.Status) + SetDefaults_ResourceList(&in.Status.Capacity) + SetDefaults_ResourceList(&in.Status.Allocatable) +} + +func SetObjectDefaults_NodeList(in *NodeList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Node(a) + } +} + +func SetObjectDefaults_PersistentVolume(in *PersistentVolume) { + SetDefaults_PersistentVolume(in) + SetDefaults_ResourceList(&in.Spec.Capacity) + if in.Spec.PersistentVolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(in.Spec.PersistentVolumeSource.RBD) + } + if in.Spec.PersistentVolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(in.Spec.PersistentVolumeSource.ISCSI) + } + if in.Spec.PersistentVolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(in.Spec.PersistentVolumeSource.AzureDisk) + } + if in.Spec.PersistentVolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(in.Spec.PersistentVolumeSource.ScaleIO) + } +} + +func SetObjectDefaults_PersistentVolumeClaim(in *PersistentVolumeClaim) { + SetDefaults_PersistentVolumeClaim(in) + SetDefaults_ResourceList(&in.Spec.Resources.Limits) + SetDefaults_ResourceList(&in.Spec.Resources.Requests) + SetDefaults_ResourceList(&in.Status.Capacity) +} + +func SetObjectDefaults_PersistentVolumeClaimList(in *PersistentVolumeClaimList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PersistentVolumeClaim(a) + } +} + +func SetObjectDefaults_PersistentVolumeList(in *PersistentVolumeList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PersistentVolume(a) + } +} + +func SetObjectDefaults_Pod(in *Pod) { + SetDefaults_Pod(in) + SetDefaults_PodSpec(&in.Spec) + for i := range in.Spec.Volumes { + a := &in.Spec.Volumes[i] + SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.InitContainers { + a := &in.Spec.InitContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Containers { + a := &in.Spec.Containers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_PodAttachOptions(in *PodAttachOptions) { + SetDefaults_PodAttachOptions(in) +} + +func SetObjectDefaults_PodExecOptions(in *PodExecOptions) { + SetDefaults_PodExecOptions(in) +} + +func SetObjectDefaults_PodList(in *PodList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Pod(a) + } +} + +func SetObjectDefaults_PodTemplate(in *PodTemplate) { + SetDefaults_PodSpec(&in.Template.Spec) + for i := range in.Template.Spec.Volumes { + a := &in.Template.Spec.Volumes[i] + SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Template.Spec.InitContainers { + a := &in.Template.Spec.InitContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Template.Spec.Containers { + a := &in.Template.Spec.Containers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_PodTemplateList(in *PodTemplateList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PodTemplate(a) + } +} + +func SetObjectDefaults_ReplicationController(in *ReplicationController) { + SetDefaults_ReplicationController(in) + if in.Spec.Template != nil { + SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + } +} + +func SetObjectDefaults_ReplicationControllerList(in *ReplicationControllerList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ReplicationController(a) + } +} + +func SetObjectDefaults_ResourceQuota(in *ResourceQuota) { + SetDefaults_ResourceList(&in.Spec.Hard) + SetDefaults_ResourceList(&in.Status.Hard) + SetDefaults_ResourceList(&in.Status.Used) +} + +func SetObjectDefaults_ResourceQuotaList(in *ResourceQuotaList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ResourceQuota(a) + } +} + +func SetObjectDefaults_Secret(in *Secret) { + SetDefaults_Secret(in) +} + +func SetObjectDefaults_SecretList(in *SecretList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Secret(a) + } +} + +func SetObjectDefaults_Service(in *Service) { + SetDefaults_Service(in) +} + +func SetObjectDefaults_ServiceList(in *ServiceList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Service(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go new file mode 100644 index 000000000..b9b24716e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go @@ -0,0 +1,3776 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package api + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + fields "k8s.io/apimachinery/pkg/fields" + labels "k8s.io/apimachinery/pkg/labels" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AWSElasticBlockStoreVolumeSource, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Affinity, InType: reflect.TypeOf(&Affinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AttachedVolume, InType: reflect.TypeOf(&AttachedVolume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AvoidPods, InType: reflect.TypeOf(&AvoidPods{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AzureDiskVolumeSource, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AzureFileVolumeSource, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Binding, InType: reflect.TypeOf(&Binding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Capabilities, InType: reflect.TypeOf(&Capabilities{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_CephFSVolumeSource, InType: reflect.TypeOf(&CephFSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_CinderVolumeSource, InType: reflect.TypeOf(&CinderVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentCondition, InType: reflect.TypeOf(&ComponentCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentStatus, InType: reflect.TypeOf(&ComponentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentStatusList, InType: reflect.TypeOf(&ComponentStatusList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMap, InType: reflect.TypeOf(&ConfigMap{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapEnvSource, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapKeySelector, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapList, InType: reflect.TypeOf(&ConfigMapList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapProjection, InType: reflect.TypeOf(&ConfigMapProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapVolumeSource, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Container, InType: reflect.TypeOf(&Container{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerImage, InType: reflect.TypeOf(&ContainerImage{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerPort, InType: reflect.TypeOf(&ContainerPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerState, InType: reflect.TypeOf(&ContainerState{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStateRunning, InType: reflect.TypeOf(&ContainerStateRunning{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStateTerminated, InType: reflect.TypeOf(&ContainerStateTerminated{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStateWaiting, InType: reflect.TypeOf(&ContainerStateWaiting{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStatus, InType: reflect.TypeOf(&ContainerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DaemonEndpoint, InType: reflect.TypeOf(&DaemonEndpoint{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIProjection, InType: reflect.TypeOf(&DownwardAPIProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIVolumeFile, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIVolumeSource, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EmptyDirVolumeSource, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointAddress, InType: reflect.TypeOf(&EndpointAddress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointPort, InType: reflect.TypeOf(&EndpointPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointSubset, InType: reflect.TypeOf(&EndpointSubset{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Endpoints, InType: reflect.TypeOf(&Endpoints{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointsList, InType: reflect.TypeOf(&EndpointsList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvFromSource, InType: reflect.TypeOf(&EnvFromSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvVar, InType: reflect.TypeOf(&EnvVar{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvVarSource, InType: reflect.TypeOf(&EnvVarSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Event, InType: reflect.TypeOf(&Event{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EventList, InType: reflect.TypeOf(&EventList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EventSource, InType: reflect.TypeOf(&EventSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ExecAction, InType: reflect.TypeOf(&ExecAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FCVolumeSource, InType: reflect.TypeOf(&FCVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FlexVolumeSource, InType: reflect.TypeOf(&FlexVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FlockerVolumeSource, InType: reflect.TypeOf(&FlockerVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_GCEPersistentDiskVolumeSource, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_GitRepoVolumeSource, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_GlusterfsVolumeSource, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HTTPGetAction, InType: reflect.TypeOf(&HTTPGetAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HTTPHeader, InType: reflect.TypeOf(&HTTPHeader{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Handler, InType: reflect.TypeOf(&Handler{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HostAlias, InType: reflect.TypeOf(&HostAlias{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HostPathVolumeSource, InType: reflect.TypeOf(&HostPathVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ISCSIVolumeSource, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_KeyToPath, InType: reflect.TypeOf(&KeyToPath{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Lifecycle, InType: reflect.TypeOf(&Lifecycle{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRange, InType: reflect.TypeOf(&LimitRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRangeItem, InType: reflect.TypeOf(&LimitRangeItem{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRangeList, InType: reflect.TypeOf(&LimitRangeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRangeSpec, InType: reflect.TypeOf(&LimitRangeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_List, InType: reflect.TypeOf(&List{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ListOptions, InType: reflect.TypeOf(&ListOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LoadBalancerIngress, InType: reflect.TypeOf(&LoadBalancerIngress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LoadBalancerStatus, InType: reflect.TypeOf(&LoadBalancerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LocalObjectReference, InType: reflect.TypeOf(&LocalObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LocalVolumeSource, InType: reflect.TypeOf(&LocalVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NFSVolumeSource, InType: reflect.TypeOf(&NFSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Namespace, InType: reflect.TypeOf(&Namespace{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NamespaceList, InType: reflect.TypeOf(&NamespaceList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NamespaceSpec, InType: reflect.TypeOf(&NamespaceSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NamespaceStatus, InType: reflect.TypeOf(&NamespaceStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Node, InType: reflect.TypeOf(&Node{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeAddress, InType: reflect.TypeOf(&NodeAddress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeAffinity, InType: reflect.TypeOf(&NodeAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeCondition, InType: reflect.TypeOf(&NodeCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeDaemonEndpoints, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeList, InType: reflect.TypeOf(&NodeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeProxyOptions, InType: reflect.TypeOf(&NodeProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeResources, InType: reflect.TypeOf(&NodeResources{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSelector, InType: reflect.TypeOf(&NodeSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSelectorRequirement, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSelectorTerm, InType: reflect.TypeOf(&NodeSelectorTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSpec, InType: reflect.TypeOf(&NodeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeStatus, InType: reflect.TypeOf(&NodeStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSystemInfo, InType: reflect.TypeOf(&NodeSystemInfo{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectFieldSelector, InType: reflect.TypeOf(&ObjectFieldSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectMeta, InType: reflect.TypeOf(&ObjectMeta{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectReference, InType: reflect.TypeOf(&ObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolume, InType: reflect.TypeOf(&PersistentVolume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaim, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimList, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimSpec, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimStatus, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimVolumeSource, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeList, InType: reflect.TypeOf(&PersistentVolumeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeSource, InType: reflect.TypeOf(&PersistentVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeSpec, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeStatus, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PhotonPersistentDiskVolumeSource, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Pod, InType: reflect.TypeOf(&Pod{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAffinity, InType: reflect.TypeOf(&PodAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAffinityTerm, InType: reflect.TypeOf(&PodAffinityTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAntiAffinity, InType: reflect.TypeOf(&PodAntiAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAttachOptions, InType: reflect.TypeOf(&PodAttachOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodCondition, InType: reflect.TypeOf(&PodCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodExecOptions, InType: reflect.TypeOf(&PodExecOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodList, InType: reflect.TypeOf(&PodList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodLogOptions, InType: reflect.TypeOf(&PodLogOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodPortForwardOptions, InType: reflect.TypeOf(&PodPortForwardOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodProxyOptions, InType: reflect.TypeOf(&PodProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSecurityContext, InType: reflect.TypeOf(&PodSecurityContext{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSignature, InType: reflect.TypeOf(&PodSignature{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSpec, InType: reflect.TypeOf(&PodSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodStatus, InType: reflect.TypeOf(&PodStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodStatusResult, InType: reflect.TypeOf(&PodStatusResult{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplate, InType: reflect.TypeOf(&PodTemplate{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplateList, InType: reflect.TypeOf(&PodTemplateList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplateSpec, InType: reflect.TypeOf(&PodTemplateSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PortworxVolumeSource, InType: reflect.TypeOf(&PortworxVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Preconditions, InType: reflect.TypeOf(&Preconditions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PreferAvoidPodsEntry, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PreferredSchedulingTerm, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Probe, InType: reflect.TypeOf(&Probe{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ProjectedVolumeSource, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_QuobyteVolumeSource, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_RBDVolumeSource, InType: reflect.TypeOf(&RBDVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_RangeAllocation, InType: reflect.TypeOf(&RangeAllocation{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationController, InType: reflect.TypeOf(&ReplicationController{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerCondition, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerList, InType: reflect.TypeOf(&ReplicationControllerList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerSpec, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerStatus, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceFieldSelector, InType: reflect.TypeOf(&ResourceFieldSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuota, InType: reflect.TypeOf(&ResourceQuota{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuotaList, InType: reflect.TypeOf(&ResourceQuotaList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuotaSpec, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuotaStatus, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceRequirements, InType: reflect.TypeOf(&ResourceRequirements{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SELinuxOptions, InType: reflect.TypeOf(&SELinuxOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ScaleIOVolumeSource, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Secret, InType: reflect.TypeOf(&Secret{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretEnvSource, InType: reflect.TypeOf(&SecretEnvSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretKeySelector, InType: reflect.TypeOf(&SecretKeySelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretList, InType: reflect.TypeOf(&SecretList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretProjection, InType: reflect.TypeOf(&SecretProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretVolumeSource, InType: reflect.TypeOf(&SecretVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecurityContext, InType: reflect.TypeOf(&SecurityContext{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SerializedReference, InType: reflect.TypeOf(&SerializedReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Service, InType: reflect.TypeOf(&Service{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceAccount, InType: reflect.TypeOf(&ServiceAccount{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceAccountList, InType: reflect.TypeOf(&ServiceAccountList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceList, InType: reflect.TypeOf(&ServiceList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServicePort, InType: reflect.TypeOf(&ServicePort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceProxyOptions, InType: reflect.TypeOf(&ServiceProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceSpec, InType: reflect.TypeOf(&ServiceSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceStatus, InType: reflect.TypeOf(&ServiceStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_StorageOSPersistentVolumeSource, InType: reflect.TypeOf(&StorageOSPersistentVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_StorageOSVolumeSource, InType: reflect.TypeOf(&StorageOSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Sysctl, InType: reflect.TypeOf(&Sysctl{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_TCPSocketAction, InType: reflect.TypeOf(&TCPSocketAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Taint, InType: reflect.TypeOf(&Taint{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Toleration, InType: reflect.TypeOf(&Toleration{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Volume, InType: reflect.TypeOf(&Volume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeMount, InType: reflect.TypeOf(&VolumeMount{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeProjection, InType: reflect.TypeOf(&VolumeProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeSource, InType: reflect.TypeOf(&VolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VsphereVirtualDiskVolumeSource, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_WeightedPodAffinityTerm, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, + ) +} + +// DeepCopy_api_AWSElasticBlockStoreVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_AWSElasticBlockStoreVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AWSElasticBlockStoreVolumeSource) + out := out.(*AWSElasticBlockStoreVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_Affinity is an autogenerated deepcopy function. +func DeepCopy_api_Affinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Affinity) + out := out.(*Affinity) + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(NodeAffinity) + if err := DeepCopy_api_NodeAffinity(*in, *out, c); err != nil { + return err + } + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + if err := DeepCopy_api_PodAffinity(*in, *out, c); err != nil { + return err + } + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + if err := DeepCopy_api_PodAntiAffinity(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_AttachedVolume is an autogenerated deepcopy function. +func DeepCopy_api_AttachedVolume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AttachedVolume) + out := out.(*AttachedVolume) + *out = *in + return nil + } +} + +// DeepCopy_api_AvoidPods is an autogenerated deepcopy function. +func DeepCopy_api_AvoidPods(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AvoidPods) + out := out.(*AvoidPods) + *out = *in + if in.PreferAvoidPods != nil { + in, out := &in.PreferAvoidPods, &out.PreferAvoidPods + *out = make([]PreferAvoidPodsEntry, len(*in)) + for i := range *in { + if err := DeepCopy_api_PreferAvoidPodsEntry(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_AzureDiskVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_AzureDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AzureDiskVolumeSource) + out := out.(*AzureDiskVolumeSource) + *out = *in + if in.CachingMode != nil { + in, out := &in.CachingMode, &out.CachingMode + *out = new(AzureDataDiskCachingMode) + **out = **in + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(AzureDataDiskKind) + **out = **in + } + return nil + } +} + +// DeepCopy_api_AzureFileVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_AzureFileVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AzureFileVolumeSource) + out := out.(*AzureFileVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_Binding is an autogenerated deepcopy function. +func DeepCopy_api_Binding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Binding) + out := out.(*Binding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + return nil + } +} + +// DeepCopy_api_Capabilities is an autogenerated deepcopy function. +func DeepCopy_api_Capabilities(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Capabilities) + out := out.(*Capabilities) + *out = *in + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + if in.Drop != nil { + in, out := &in.Drop, &out.Drop + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_CephFSVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_CephFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CephFSVolumeSource) + out := out.(*CephFSVolumeSource) + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_api_CinderVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_CinderVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CinderVolumeSource) + out := out.(*CinderVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_ComponentCondition is an autogenerated deepcopy function. +func DeepCopy_api_ComponentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentCondition) + out := out.(*ComponentCondition) + *out = *in + return nil + } +} + +// DeepCopy_api_ComponentStatus is an autogenerated deepcopy function. +func DeepCopy_api_ComponentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentStatus) + out := out.(*ComponentStatus) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ComponentCondition, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_ComponentStatusList is an autogenerated deepcopy function. +func DeepCopy_api_ComponentStatusList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentStatusList) + out := out.(*ComponentStatusList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComponentStatus, len(*in)) + for i := range *in { + if err := DeepCopy_api_ComponentStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_ConfigMap is an autogenerated deepcopy function. +func DeepCopy_api_ConfigMap(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMap) + out := out.(*ConfigMap) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +// DeepCopy_api_ConfigMapEnvSource is an autogenerated deepcopy function. +func DeepCopy_api_ConfigMapEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapEnvSource) + out := out.(*ConfigMapEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_ConfigMapKeySelector is an autogenerated deepcopy function. +func DeepCopy_api_ConfigMapKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapKeySelector) + out := out.(*ConfigMapKeySelector) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_ConfigMapList is an autogenerated deepcopy function. +func DeepCopy_api_ConfigMapList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapList) + out := out.(*ConfigMapList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigMap, len(*in)) + for i := range *in { + if err := DeepCopy_api_ConfigMap(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_ConfigMapProjection is an autogenerated deepcopy function. +func DeepCopy_api_ConfigMapProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapProjection) + out := out.(*ConfigMapProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_ConfigMapVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_ConfigMapVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapVolumeSource) + out := out.(*ConfigMapVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_Container is an autogenerated deepcopy function. +func DeepCopy_api_Container(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Container) + out := out.(*Container) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + if err := DeepCopy_api_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + if err := DeepCopy_api_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if err := DeepCopy_api_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { + return err + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + if err := DeepCopy_api_Probe(*in, *out, c); err != nil { + return err + } + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + if err := DeepCopy_api_Probe(*in, *out, c); err != nil { + return err + } + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + if err := DeepCopy_api_Lifecycle(*in, *out, c); err != nil { + return err + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + if err := DeepCopy_api_SecurityContext(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_ContainerImage is an autogenerated deepcopy function. +func DeepCopy_api_ContainerImage(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerImage) + out := out.(*ContainerImage) + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_ContainerPort is an autogenerated deepcopy function. +func DeepCopy_api_ContainerPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerPort) + out := out.(*ContainerPort) + *out = *in + return nil + } +} + +// DeepCopy_api_ContainerState is an autogenerated deepcopy function. +func DeepCopy_api_ContainerState(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerState) + out := out.(*ContainerState) + *out = *in + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + *out = new(ContainerStateWaiting) + **out = **in + } + if in.Running != nil { + in, out := &in.Running, &out.Running + *out = new(ContainerStateRunning) + if err := DeepCopy_api_ContainerStateRunning(*in, *out, c); err != nil { + return err + } + } + if in.Terminated != nil { + in, out := &in.Terminated, &out.Terminated + *out = new(ContainerStateTerminated) + if err := DeepCopy_api_ContainerStateTerminated(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_ContainerStateRunning is an autogenerated deepcopy function. +func DeepCopy_api_ContainerStateRunning(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateRunning) + out := out.(*ContainerStateRunning) + *out = *in + out.StartedAt = in.StartedAt.DeepCopy() + return nil + } +} + +// DeepCopy_api_ContainerStateTerminated is an autogenerated deepcopy function. +func DeepCopy_api_ContainerStateTerminated(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateTerminated) + out := out.(*ContainerStateTerminated) + *out = *in + out.StartedAt = in.StartedAt.DeepCopy() + out.FinishedAt = in.FinishedAt.DeepCopy() + return nil + } +} + +// DeepCopy_api_ContainerStateWaiting is an autogenerated deepcopy function. +func DeepCopy_api_ContainerStateWaiting(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateWaiting) + out := out.(*ContainerStateWaiting) + *out = *in + return nil + } +} + +// DeepCopy_api_ContainerStatus is an autogenerated deepcopy function. +func DeepCopy_api_ContainerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStatus) + out := out.(*ContainerStatus) + *out = *in + if err := DeepCopy_api_ContainerState(&in.State, &out.State, c); err != nil { + return err + } + if err := DeepCopy_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_DaemonEndpoint is an autogenerated deepcopy function. +func DeepCopy_api_DaemonEndpoint(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonEndpoint) + out := out.(*DaemonEndpoint) + *out = *in + return nil + } +} + +// DeepCopy_api_DeleteOptions is an autogenerated deepcopy function. +func DeepCopy_api_DeleteOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeleteOptions) + out := out.(*DeleteOptions) + *out = *in + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + *out = new(Preconditions) + if err := DeepCopy_api_Preconditions(*in, *out, c); err != nil { + return err + } + } + if in.OrphanDependents != nil { + in, out := &in.OrphanDependents, &out.OrphanDependents + *out = new(bool) + **out = **in + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + *out = new(DeletionPropagation) + **out = **in + } + return nil + } +} + +// DeepCopy_api_DownwardAPIProjection is an autogenerated deepcopy function. +func DeepCopy_api_DownwardAPIProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIProjection) + out := out.(*DownwardAPIProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := DeepCopy_api_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_DownwardAPIVolumeFile is an autogenerated deepcopy function. +func DeepCopy_api_DownwardAPIVolumeFile(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIVolumeFile) + out := out.(*DownwardAPIVolumeFile) + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_api_DownwardAPIVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_DownwardAPIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIVolumeSource) + out := out.(*DownwardAPIVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := DeepCopy_api_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_api_EmptyDirVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_EmptyDirVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EmptyDirVolumeSource) + out := out.(*EmptyDirVolumeSource) + *out = *in + out.SizeLimit = in.SizeLimit.DeepCopy() + return nil + } +} + +// DeepCopy_api_EndpointAddress is an autogenerated deepcopy function. +func DeepCopy_api_EndpointAddress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointAddress) + out := out.(*EndpointAddress) + *out = *in + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_api_EndpointPort is an autogenerated deepcopy function. +func DeepCopy_api_EndpointPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointPort) + out := out.(*EndpointPort) + *out = *in + return nil + } +} + +// DeepCopy_api_EndpointSubset is an autogenerated deepcopy function. +func DeepCopy_api_EndpointSubset(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointSubset) + out := out.(*EndpointSubset) + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + if err := DeepCopy_api_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.NotReadyAddresses != nil { + in, out := &in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + if err := DeepCopy_api_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_Endpoints is an autogenerated deepcopy function. +func DeepCopy_api_Endpoints(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Endpoints) + out := out.(*Endpoints) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]EndpointSubset, len(*in)) + for i := range *in { + if err := DeepCopy_api_EndpointSubset(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_EndpointsList is an autogenerated deepcopy function. +func DeepCopy_api_EndpointsList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointsList) + out := out.(*EndpointsList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoints, len(*in)) + for i := range *in { + if err := DeepCopy_api_Endpoints(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_EnvFromSource is an autogenerated deepcopy function. +func DeepCopy_api_EnvFromSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvFromSource) + out := out.(*EnvFromSource) + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(ConfigMapEnvSource) + if err := DeepCopy_api_ConfigMapEnvSource(*in, *out, c); err != nil { + return err + } + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretEnvSource) + if err := DeepCopy_api_SecretEnvSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_EnvVar is an autogenerated deepcopy function. +func DeepCopy_api_EnvVar(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvVar) + out := out.(*EnvVar) + *out = *in + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(EnvVarSource) + if err := DeepCopy_api_EnvVarSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_EnvVarSource is an autogenerated deepcopy function. +func DeepCopy_api_EnvVarSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvVarSource) + out := out.(*EnvVarSource) + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(ConfigMapKeySelector) + if err := DeepCopy_api_ConfigMapKeySelector(*in, *out, c); err != nil { + return err + } + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(SecretKeySelector) + if err := DeepCopy_api_SecretKeySelector(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_Event is an autogenerated deepcopy function. +func DeepCopy_api_Event(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Event) + out := out.(*Event) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + out.FirstTimestamp = in.FirstTimestamp.DeepCopy() + out.LastTimestamp = in.LastTimestamp.DeepCopy() + return nil + } +} + +// DeepCopy_api_EventList is an autogenerated deepcopy function. +func DeepCopy_api_EventList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EventList) + out := out.(*EventList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + if err := DeepCopy_api_Event(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_EventSource is an autogenerated deepcopy function. +func DeepCopy_api_EventSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EventSource) + out := out.(*EventSource) + *out = *in + return nil + } +} + +// DeepCopy_api_ExecAction is an autogenerated deepcopy function. +func DeepCopy_api_ExecAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExecAction) + out := out.(*ExecAction) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_FCVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_FCVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FCVolumeSource) + out := out.(*FCVolumeSource) + *out = *in + if in.TargetWWNs != nil { + in, out := &in.TargetWWNs, &out.TargetWWNs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_api_FlexVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_FlexVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FlexVolumeSource) + out := out.(*FlexVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +// DeepCopy_api_FlockerVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_FlockerVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FlockerVolumeSource) + out := out.(*FlockerVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_GCEPersistentDiskVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_GCEPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GCEPersistentDiskVolumeSource) + out := out.(*GCEPersistentDiskVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_GitRepoVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_GitRepoVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GitRepoVolumeSource) + out := out.(*GitRepoVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_GlusterfsVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_GlusterfsVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GlusterfsVolumeSource) + out := out.(*GlusterfsVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_HTTPGetAction is an autogenerated deepcopy function. +func DeepCopy_api_HTTPGetAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPGetAction) + out := out.(*HTTPGetAction) + *out = *in + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_HTTPHeader is an autogenerated deepcopy function. +func DeepCopy_api_HTTPHeader(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPHeader) + out := out.(*HTTPHeader) + *out = *in + return nil + } +} + +// DeepCopy_api_Handler is an autogenerated deepcopy function. +func DeepCopy_api_Handler(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Handler) + out := out.(*Handler) + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecAction) + if err := DeepCopy_api_ExecAction(*in, *out, c); err != nil { + return err + } + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + if err := DeepCopy_api_HTTPGetAction(*in, *out, c); err != nil { + return err + } + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + **out = **in + } + return nil + } +} + +// DeepCopy_api_HostAlias is an autogenerated deepcopy function. +func DeepCopy_api_HostAlias(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HostAlias) + out := out.(*HostAlias) + *out = *in + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_HostPathVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_HostPathVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HostPathVolumeSource) + out := out.(*HostPathVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_ISCSIVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_ISCSIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ISCSIVolumeSource) + out := out.(*ISCSIVolumeSource) + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_api_KeyToPath is an autogenerated deepcopy function. +func DeepCopy_api_KeyToPath(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*KeyToPath) + out := out.(*KeyToPath) + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_api_Lifecycle is an autogenerated deepcopy function. +func DeepCopy_api_Lifecycle(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Lifecycle) + out := out.(*Lifecycle) + *out = *in + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + *out = new(Handler) + if err := DeepCopy_api_Handler(*in, *out, c); err != nil { + return err + } + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + *out = new(Handler) + if err := DeepCopy_api_Handler(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_LimitRange is an autogenerated deepcopy function. +func DeepCopy_api_LimitRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRange) + out := out.(*LimitRange) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_LimitRangeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_LimitRangeItem is an autogenerated deepcopy function. +func DeepCopy_api_LimitRangeItem(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeItem) + out := out.(*LimitRangeItem) + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.DefaultRequest != nil { + in, out := &in.DefaultRequest, &out.DefaultRequest + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxLimitRequestRatio != nil { + in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +// DeepCopy_api_LimitRangeList is an autogenerated deepcopy function. +func DeepCopy_api_LimitRangeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeList) + out := out.(*LimitRangeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LimitRange, len(*in)) + for i := range *in { + if err := DeepCopy_api_LimitRange(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_LimitRangeSpec is an autogenerated deepcopy function. +func DeepCopy_api_LimitRangeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeSpec) + out := out.(*LimitRangeSpec) + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make([]LimitRangeItem, len(*in)) + for i := range *in { + if err := DeepCopy_api_LimitRangeItem(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_List is an autogenerated deepcopy function. +func DeepCopy_api_List(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*List) + out := out.(*List) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*runtime.Object) + } + } + } + return nil + } +} + +// DeepCopy_api_ListOptions is an autogenerated deepcopy function. +func DeepCopy_api_ListOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ListOptions) + out := out.(*ListOptions) + *out = *in + // in.LabelSelector is kind 'Interface' + if in.LabelSelector != nil { + if newVal, err := c.DeepCopy(&in.LabelSelector); err != nil { + return err + } else { + out.LabelSelector = *newVal.(*labels.Selector) + } + } + // in.FieldSelector is kind 'Interface' + if in.FieldSelector != nil { + if newVal, err := c.DeepCopy(&in.FieldSelector); err != nil { + return err + } else { + out.FieldSelector = *newVal.(*fields.Selector) + } + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +// DeepCopy_api_LoadBalancerIngress is an autogenerated deepcopy function. +func DeepCopy_api_LoadBalancerIngress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LoadBalancerIngress) + out := out.(*LoadBalancerIngress) + *out = *in + return nil + } +} + +// DeepCopy_api_LoadBalancerStatus is an autogenerated deepcopy function. +func DeepCopy_api_LoadBalancerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LoadBalancerStatus) + out := out.(*LoadBalancerStatus) + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]LoadBalancerIngress, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_LocalObjectReference is an autogenerated deepcopy function. +func DeepCopy_api_LocalObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalObjectReference) + out := out.(*LocalObjectReference) + *out = *in + return nil + } +} + +// DeepCopy_api_LocalVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_LocalVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalVolumeSource) + out := out.(*LocalVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_NFSVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_NFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NFSVolumeSource) + out := out.(*NFSVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_Namespace is an autogenerated deepcopy function. +func DeepCopy_api_Namespace(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Namespace) + out := out.(*Namespace) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_NamespaceSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_NamespaceList is an autogenerated deepcopy function. +func DeepCopy_api_NamespaceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceList) + out := out.(*NamespaceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Namespace, len(*in)) + for i := range *in { + if err := DeepCopy_api_Namespace(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_NamespaceSpec is an autogenerated deepcopy function. +func DeepCopy_api_NamespaceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceSpec) + out := out.(*NamespaceSpec) + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]FinalizerName, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_NamespaceStatus is an autogenerated deepcopy function. +func DeepCopy_api_NamespaceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceStatus) + out := out.(*NamespaceStatus) + *out = *in + return nil + } +} + +// DeepCopy_api_Node is an autogenerated deepcopy function. +func DeepCopy_api_Node(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Node) + out := out.(*Node) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_NodeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_NodeStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_NodeAddress is an autogenerated deepcopy function. +func DeepCopy_api_NodeAddress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeAddress) + out := out.(*NodeAddress) + *out = *in + return nil + } +} + +// DeepCopy_api_NodeAffinity is an autogenerated deepcopy function. +func DeepCopy_api_NodeAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeAffinity) + out := out.(*NodeAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = new(NodeSelector) + if err := DeepCopy_api_NodeSelector(*in, *out, c); err != nil { + return err + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]PreferredSchedulingTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_PreferredSchedulingTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_NodeCondition is an autogenerated deepcopy function. +func DeepCopy_api_NodeCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeCondition) + out := out.(*NodeCondition) + *out = *in + out.LastHeartbeatTime = in.LastHeartbeatTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +// DeepCopy_api_NodeDaemonEndpoints is an autogenerated deepcopy function. +func DeepCopy_api_NodeDaemonEndpoints(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeDaemonEndpoints) + out := out.(*NodeDaemonEndpoints) + *out = *in + return nil + } +} + +// DeepCopy_api_NodeList is an autogenerated deepcopy function. +func DeepCopy_api_NodeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeList) + out := out.(*NodeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + if err := DeepCopy_api_Node(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_NodeProxyOptions is an autogenerated deepcopy function. +func DeepCopy_api_NodeProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeProxyOptions) + out := out.(*NodeProxyOptions) + *out = *in + return nil + } +} + +// DeepCopy_api_NodeResources is an autogenerated deepcopy function. +func DeepCopy_api_NodeResources(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeResources) + out := out.(*NodeResources) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +// DeepCopy_api_NodeSelector is an autogenerated deepcopy function. +func DeepCopy_api_NodeSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelector) + out := out.(*NodeSelector) + *out = *in + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_NodeSelectorTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_NodeSelectorRequirement is an autogenerated deepcopy function. +func DeepCopy_api_NodeSelectorRequirement(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelectorRequirement) + out := out.(*NodeSelectorRequirement) + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_NodeSelectorTerm is an autogenerated deepcopy function. +func DeepCopy_api_NodeSelectorTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelectorTerm) + out := out.(*NodeSelectorTerm) + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + if err := DeepCopy_api_NodeSelectorRequirement(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_NodeSpec is an autogenerated deepcopy function. +func DeepCopy_api_NodeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSpec) + out := out.(*NodeSpec) + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + for i := range *in { + if err := DeepCopy_api_Taint(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_NodeStatus is an autogenerated deepcopy function. +func DeepCopy_api_NodeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeStatus) + out := out.(*NodeStatus) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodeCondition, len(*in)) + for i := range *in { + if err := DeepCopy_api_NodeCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]NodeAddress, len(*in)) + copy(*out, *in) + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ContainerImage, len(*in)) + for i := range *in { + if err := DeepCopy_api_ContainerImage(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.VolumesInUse != nil { + in, out := &in.VolumesInUse, &out.VolumesInUse + *out = make([]UniqueVolumeName, len(*in)) + copy(*out, *in) + } + if in.VolumesAttached != nil { + in, out := &in.VolumesAttached, &out.VolumesAttached + *out = make([]AttachedVolume, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_NodeSystemInfo is an autogenerated deepcopy function. +func DeepCopy_api_NodeSystemInfo(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSystemInfo) + out := out.(*NodeSystemInfo) + *out = *in + return nil + } +} + +// DeepCopy_api_ObjectFieldSelector is an autogenerated deepcopy function. +func DeepCopy_api_ObjectFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectFieldSelector) + out := out.(*ObjectFieldSelector) + *out = *in + return nil + } +} + +// DeepCopy_api_ObjectMeta is an autogenerated deepcopy function. +func DeepCopy_api_ObjectMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMeta) + out := out.(*ObjectMeta) + *out = *in + out.CreationTimestamp = in.CreationTimestamp.DeepCopy() + if in.DeletionTimestamp != nil { + in, out := &in.DeletionTimestamp, &out.DeletionTimestamp + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]v1.OwnerReference, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*v1.OwnerReference) + } + } + } + if in.Initializers != nil { + in, out := &in.Initializers, &out.Initializers + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.Initializers) + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_ObjectReference is an autogenerated deepcopy function. +func DeepCopy_api_ObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectReference) + out := out.(*ObjectReference) + *out = *in + return nil + } +} + +// DeepCopy_api_PersistentVolume is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolume) + out := out.(*PersistentVolume) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PersistentVolumeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_PersistentVolumeClaim is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeClaim(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaim) + out := out.(*PersistentVolumeClaim) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_PersistentVolumeClaimStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_PersistentVolumeClaimList is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeClaimList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimList) + out := out.(*PersistentVolumeClaimList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := DeepCopy_api_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_PersistentVolumeClaimSpec is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeClaimSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimSpec) + out := out.(*PersistentVolumeClaimSpec) + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := DeepCopy_api_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { + return err + } + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + return nil + } +} + +// DeepCopy_api_PersistentVolumeClaimStatus is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeClaimStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimStatus) + out := out.(*PersistentVolumeClaimStatus) + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +// DeepCopy_api_PersistentVolumeClaimVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeClaimVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimVolumeSource) + out := out.(*PersistentVolumeClaimVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_PersistentVolumeList is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeList) + out := out.(*PersistentVolumeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + if err := DeepCopy_api_PersistentVolume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_PersistentVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeSource) + out := out.(*PersistentVolumeSource) + *out = *in + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + **out = **in + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := DeepCopy_api_RBDVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := DeepCopy_api_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := DeepCopy_api_FlexVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + **out = **in + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := DeepCopy_api_CephFSVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + if err := DeepCopy_api_FCVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + if err := DeepCopy_api_AzureDiskVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_api_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Local != nil { + in, out := &in.Local, &out.Local + *out = new(LocalVolumeSource) + **out = **in + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + *out = new(StorageOSPersistentVolumeSource) + if err := DeepCopy_api_StorageOSPersistentVolumeSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_PersistentVolumeSpec is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeSpec) + out := out.(*PersistentVolumeSpec) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if err := DeepCopy_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil { + return err + } + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.ClaimRef != nil { + in, out := &in.ClaimRef, &out.ClaimRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_api_PersistentVolumeStatus is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeStatus) + out := out.(*PersistentVolumeStatus) + *out = *in + return nil + } +} + +// DeepCopy_api_PhotonPersistentDiskVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_PhotonPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PhotonPersistentDiskVolumeSource) + out := out.(*PhotonPersistentDiskVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_Pod is an autogenerated deepcopy function. +func DeepCopy_api_Pod(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Pod) + out := out.(*Pod) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PodSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_PodStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_PodAffinity is an autogenerated deepcopy function. +func DeepCopy_api_PodAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAffinity) + out := out.(*PodAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_PodAffinityTerm is an autogenerated deepcopy function. +func DeepCopy_api_PodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAffinityTerm) + out := out.(*PodAffinityTerm) + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_PodAntiAffinity is an autogenerated deepcopy function. +func DeepCopy_api_PodAntiAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAntiAffinity) + out := out.(*PodAntiAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_PodAttachOptions is an autogenerated deepcopy function. +func DeepCopy_api_PodAttachOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAttachOptions) + out := out.(*PodAttachOptions) + *out = *in + return nil + } +} + +// DeepCopy_api_PodCondition is an autogenerated deepcopy function. +func DeepCopy_api_PodCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodCondition) + out := out.(*PodCondition) + *out = *in + out.LastProbeTime = in.LastProbeTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +// DeepCopy_api_PodExecOptions is an autogenerated deepcopy function. +func DeepCopy_api_PodExecOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodExecOptions) + out := out.(*PodExecOptions) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_PodList is an autogenerated deepcopy function. +func DeepCopy_api_PodList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodList) + out := out.(*PodList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + if err := DeepCopy_api_Pod(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_PodLogOptions is an autogenerated deepcopy function. +func DeepCopy_api_PodLogOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodLogOptions) + out := out.(*PodLogOptions) + *out = *in + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + return nil + } +} + +// DeepCopy_api_PodPortForwardOptions is an autogenerated deepcopy function. +func DeepCopy_api_PodPortForwardOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPortForwardOptions) + out := out.(*PodPortForwardOptions) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_PodProxyOptions is an autogenerated deepcopy function. +func DeepCopy_api_PodProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodProxyOptions) + out := out.(*PodProxyOptions) + *out = *in + return nil + } +} + +// DeepCopy_api_PodSecurityContext is an autogenerated deepcopy function. +func DeepCopy_api_PodSecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityContext) + out := out.(*PodSecurityContext) + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(types.UnixUserID) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]types.UnixGroupID, len(*in)) + copy(*out, *in) + } + if in.FSGroup != nil { + in, out := &in.FSGroup, &out.FSGroup + *out = new(types.UnixGroupID) + **out = **in + } + return nil + } +} + +// DeepCopy_api_PodSignature is an autogenerated deepcopy function. +func DeepCopy_api_PodSignature(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSignature) + out := out.(*PodSignature) + *out = *in + if in.PodController != nil { + in, out := &in.PodController, &out.PodController + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.OwnerReference) + } + } + return nil + } +} + +// DeepCopy_api_PodSpec is an autogenerated deepcopy function. +func DeepCopy_api_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSpec) + out := out.(*PodSpec) + *out = *in + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + if err := DeepCopy_api_Volume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]Container, len(*in)) + for i := range *in { + if err := DeepCopy_api_Container(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]Container, len(*in)) + for i := range *in { + if err := DeepCopy_api_Container(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(PodSecurityContext) + if err := DeepCopy_api_PodSecurityContext(*in, *out, c); err != nil { + return err + } + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(Affinity) + if err := DeepCopy_api_Affinity(*in, *out, c); err != nil { + return err + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + if err := DeepCopy_api_Toleration(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]HostAlias, len(*in)) + for i := range *in { + if err := DeepCopy_api_HostAlias(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_PodStatus is an autogenerated deepcopy function. +func DeepCopy_api_PodStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodStatus) + out := out.(*PodStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PodCondition, len(*in)) + for i := range *in { + if err := DeepCopy_api_PodCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := DeepCopy_api_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.ContainerStatuses != nil { + in, out := &in.ContainerStatuses, &out.ContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := DeepCopy_api_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_PodStatusResult is an autogenerated deepcopy function. +func DeepCopy_api_PodStatusResult(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodStatusResult) + out := out.(*PodStatusResult) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PodStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_PodTemplate is an autogenerated deepcopy function. +func DeepCopy_api_PodTemplate(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplate) + out := out.(*PodTemplate) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_PodTemplateList is an autogenerated deepcopy function. +func DeepCopy_api_PodTemplateList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplateList) + out := out.(*PodTemplateList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + if err := DeepCopy_api_PodTemplate(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_PodTemplateSpec is an autogenerated deepcopy function. +func DeepCopy_api_PodTemplateSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplateSpec) + out := out.(*PodTemplateSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PodSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_PortworxVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_PortworxVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PortworxVolumeSource) + out := out.(*PortworxVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_Preconditions is an autogenerated deepcopy function. +func DeepCopy_api_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Preconditions) + out := out.(*Preconditions) + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + return nil + } +} + +// DeepCopy_api_PreferAvoidPodsEntry is an autogenerated deepcopy function. +func DeepCopy_api_PreferAvoidPodsEntry(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PreferAvoidPodsEntry) + out := out.(*PreferAvoidPodsEntry) + *out = *in + if err := DeepCopy_api_PodSignature(&in.PodSignature, &out.PodSignature, c); err != nil { + return err + } + out.EvictionTime = in.EvictionTime.DeepCopy() + return nil + } +} + +// DeepCopy_api_PreferredSchedulingTerm is an autogenerated deepcopy function. +func DeepCopy_api_PreferredSchedulingTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PreferredSchedulingTerm) + out := out.(*PreferredSchedulingTerm) + *out = *in + if err := DeepCopy_api_NodeSelectorTerm(&in.Preference, &out.Preference, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_Probe is an autogenerated deepcopy function. +func DeepCopy_api_Probe(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Probe) + out := out.(*Probe) + *out = *in + if err := DeepCopy_api_Handler(&in.Handler, &out.Handler, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_ProjectedVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_ProjectedVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ProjectedVolumeSource) + out := out.(*ProjectedVolumeSource) + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]VolumeProjection, len(*in)) + for i := range *in { + if err := DeepCopy_api_VolumeProjection(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_api_QuobyteVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_QuobyteVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*QuobyteVolumeSource) + out := out.(*QuobyteVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_RBDVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_RBDVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RBDVolumeSource) + out := out.(*RBDVolumeSource) + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_api_RangeAllocation is an autogenerated deepcopy function. +func DeepCopy_api_RangeAllocation(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RangeAllocation) + out := out.(*RangeAllocation) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_ReplicationController is an autogenerated deepcopy function. +func DeepCopy_api_ReplicationController(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationController) + out := out.(*ReplicationController) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_ReplicationControllerSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_ReplicationControllerStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_ReplicationControllerCondition is an autogenerated deepcopy function. +func DeepCopy_api_ReplicationControllerCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerCondition) + out := out.(*ReplicationControllerCondition) + *out = *in + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +// DeepCopy_api_ReplicationControllerList is an autogenerated deepcopy function. +func DeepCopy_api_ReplicationControllerList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerList) + out := out.(*ReplicationControllerList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + if err := DeepCopy_api_ReplicationController(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_ReplicationControllerSpec is an autogenerated deepcopy function. +func DeepCopy_api_ReplicationControllerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerSpec) + out := out.(*ReplicationControllerSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(PodTemplateSpec) + if err := DeepCopy_api_PodTemplateSpec(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_ReplicationControllerStatus is an autogenerated deepcopy function. +func DeepCopy_api_ReplicationControllerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerStatus) + out := out.(*ReplicationControllerStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicationControllerCondition, len(*in)) + for i := range *in { + if err := DeepCopy_api_ReplicationControllerCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_ResourceFieldSelector is an autogenerated deepcopy function. +func DeepCopy_api_ResourceFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceFieldSelector) + out := out.(*ResourceFieldSelector) + *out = *in + out.Divisor = in.Divisor.DeepCopy() + return nil + } +} + +// DeepCopy_api_ResourceQuota is an autogenerated deepcopy function. +func DeepCopy_api_ResourceQuota(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuota) + out := out.(*ResourceQuota) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_ResourceQuotaSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_ResourceQuotaStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_ResourceQuotaList is an autogenerated deepcopy function. +func DeepCopy_api_ResourceQuotaList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaList) + out := out.(*ResourceQuotaList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceQuota, len(*in)) + for i := range *in { + if err := DeepCopy_api_ResourceQuota(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_ResourceQuotaSpec is an autogenerated deepcopy function. +func DeepCopy_api_ResourceQuotaSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaSpec) + out := out.(*ResourceQuotaSpec) + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ResourceQuotaScope, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_ResourceQuotaStatus is an autogenerated deepcopy function. +func DeepCopy_api_ResourceQuotaStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaStatus) + out := out.(*ResourceQuotaStatus) + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +// DeepCopy_api_ResourceRequirements is an autogenerated deepcopy function. +func DeepCopy_api_ResourceRequirements(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceRequirements) + out := out.(*ResourceRequirements) + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +// DeepCopy_api_SELinuxOptions is an autogenerated deepcopy function. +func DeepCopy_api_SELinuxOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SELinuxOptions) + out := out.(*SELinuxOptions) + *out = *in + return nil + } +} + +// DeepCopy_api_ScaleIOVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_ScaleIOVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleIOVolumeSource) + out := out.(*ScaleIOVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_api_Secret is an autogenerated deepcopy function. +func DeepCopy_api_Secret(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Secret) + out := out.(*Secret) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string][]byte) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*[]byte) + } + } + } + return nil + } +} + +// DeepCopy_api_SecretEnvSource is an autogenerated deepcopy function. +func DeepCopy_api_SecretEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretEnvSource) + out := out.(*SecretEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_SecretKeySelector is an autogenerated deepcopy function. +func DeepCopy_api_SecretKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretKeySelector) + out := out.(*SecretKeySelector) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_SecretList is an autogenerated deepcopy function. +func DeepCopy_api_SecretList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretList) + out := out.(*SecretList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + if err := DeepCopy_api_Secret(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_SecretProjection is an autogenerated deepcopy function. +func DeepCopy_api_SecretProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretProjection) + out := out.(*SecretProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_SecretVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_SecretVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretVolumeSource) + out := out.(*SecretVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_SecurityContext is an autogenerated deepcopy function. +func DeepCopy_api_SecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecurityContext) + out := out.(*SecurityContext) + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(Capabilities) + if err := DeepCopy_api_Capabilities(*in, *out, c); err != nil { + return err + } + } + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(types.UnixUserID) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.ReadOnlyRootFilesystem != nil { + in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_SerializedReference is an autogenerated deepcopy function. +func DeepCopy_api_SerializedReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SerializedReference) + out := out.(*SerializedReference) + *out = *in + return nil + } +} + +// DeepCopy_api_Service is an autogenerated deepcopy function. +func DeepCopy_api_Service(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Service) + out := out.(*Service) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_ServiceSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_ServiceStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_ServiceAccount is an autogenerated deepcopy function. +func DeepCopy_api_ServiceAccount(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceAccount) + out := out.(*ServiceAccount) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_ServiceAccountList is an autogenerated deepcopy function. +func DeepCopy_api_ServiceAccountList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceAccountList) + out := out.(*ServiceAccountList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccount, len(*in)) + for i := range *in { + if err := DeepCopy_api_ServiceAccount(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_ServiceList is an autogenerated deepcopy function. +func DeepCopy_api_ServiceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceList) + out := out.(*ServiceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + if err := DeepCopy_api_Service(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_ServicePort is an autogenerated deepcopy function. +func DeepCopy_api_ServicePort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServicePort) + out := out.(*ServicePort) + *out = *in + return nil + } +} + +// DeepCopy_api_ServiceProxyOptions is an autogenerated deepcopy function. +func DeepCopy_api_ServiceProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceProxyOptions) + out := out.(*ServiceProxyOptions) + *out = *in + return nil + } +} + +// DeepCopy_api_ServiceSpec is an autogenerated deepcopy function. +func DeepCopy_api_ServiceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceSpec) + out := out.(*ServiceSpec) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ServicePort, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalIPs != nil { + in, out := &in.ExternalIPs, &out.ExternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_ServiceStatus is an autogenerated deepcopy function. +func DeepCopy_api_ServiceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceStatus) + out := out.(*ServiceStatus) + *out = *in + if err := DeepCopy_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_StorageOSPersistentVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_StorageOSPersistentVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageOSPersistentVolumeSource) + out := out.(*StorageOSPersistentVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_api_StorageOSVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_StorageOSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageOSVolumeSource) + out := out.(*StorageOSVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_api_Sysctl is an autogenerated deepcopy function. +func DeepCopy_api_Sysctl(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Sysctl) + out := out.(*Sysctl) + *out = *in + return nil + } +} + +// DeepCopy_api_TCPSocketAction is an autogenerated deepcopy function. +func DeepCopy_api_TCPSocketAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TCPSocketAction) + out := out.(*TCPSocketAction) + *out = *in + return nil + } +} + +// DeepCopy_api_Taint is an autogenerated deepcopy function. +func DeepCopy_api_Taint(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Taint) + out := out.(*Taint) + *out = *in + out.TimeAdded = in.TimeAdded.DeepCopy() + return nil + } +} + +// DeepCopy_api_Toleration is an autogenerated deepcopy function. +func DeepCopy_api_Toleration(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Toleration) + out := out.(*Toleration) + *out = *in + if in.TolerationSeconds != nil { + in, out := &in.TolerationSeconds, &out.TolerationSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +// DeepCopy_api_Volume is an autogenerated deepcopy function. +func DeepCopy_api_Volume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Volume) + out := out.(*Volume) + *out = *in + if err := DeepCopy_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_VolumeMount is an autogenerated deepcopy function. +func DeepCopy_api_VolumeMount(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeMount) + out := out.(*VolumeMount) + *out = *in + return nil + } +} + +// DeepCopy_api_VolumeProjection is an autogenerated deepcopy function. +func DeepCopy_api_VolumeProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeProjection) + out := out.(*VolumeProjection) + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretProjection) + if err := DeepCopy_api_SecretProjection(*in, *out, c); err != nil { + return err + } + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIProjection) + if err := DeepCopy_api_DownwardAPIProjection(*in, *out, c); err != nil { + return err + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapProjection) + if err := DeepCopy_api_ConfigMapProjection(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_VolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_VolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeSource) + out := out.(*VolumeSource) + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + **out = **in + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirVolumeSource) + if err := DeepCopy_api_EmptyDirVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.GitRepo != nil { + in, out := &in.GitRepo, &out.GitRepo + *out = new(GitRepoVolumeSource) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretVolumeSource) + if err := DeepCopy_api_SecretVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := DeepCopy_api_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := DeepCopy_api_RBDVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := DeepCopy_api_FlexVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + **out = **in + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := DeepCopy_api_CephFSVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIVolumeSource) + if err := DeepCopy_api_DownwardAPIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + if err := DeepCopy_api_FCVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapVolumeSource) + if err := DeepCopy_api_ConfigMapVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + if err := DeepCopy_api_AzureDiskVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(ProjectedVolumeSource) + if err := DeepCopy_api_ProjectedVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_api_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + *out = new(StorageOSVolumeSource) + if err := DeepCopy_api_StorageOSVolumeSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_VsphereVirtualDiskVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_VsphereVirtualDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VsphereVirtualDiskVolumeSource) + out := out.(*VsphereVirtualDiskVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_WeightedPodAffinityTerm is an autogenerated deepcopy function. +func DeepCopy_api_WeightedPodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*WeightedPodAffinityTerm) + out := out.(*WeightedPodAffinityTerm) + *out = *in + if err := DeepCopy_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { + return err + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/doc.go b/vendor/k8s.io/client-go/pkg/apis/extensions/doc.go new file mode 100644 index 000000000..87edee41c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensions diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/helpers.go b/vendor/k8s.io/client-go/pkg/apis/extensions/helpers.go new file mode 100644 index 000000000..27d3e23ad --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/helpers.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensions + +import ( + "strings" +) + +// SysctlsFromPodSecurityPolicyAnnotation parses an annotation value of the key +// SysctlsSecurityPolocyAnnotationKey into a slice of sysctls. An empty slice +// is returned if annotation is the empty string. +func SysctlsFromPodSecurityPolicyAnnotation(annotation string) ([]string, error) { + if len(annotation) == 0 { + return []string{}, nil + } + + return strings.Split(annotation, ","), nil +} + +// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. +func PodAnnotationsFromSysctls(sysctls []string) string { + return strings.Join(sysctls, ",") +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/register.go b/vendor/k8s.io/client-go/pkg/apis/extensions/register.go new file mode 100644 index 000000000..5983636c2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/register.go @@ -0,0 +1,70 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensions + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "extensions" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + // TODO this gets cleaned up when the types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &DeploymentRollback{}, + &ReplicationControllerDummy{}, + &Scale{}, + &ThirdPartyResource{}, + &ThirdPartyResourceList{}, + &DaemonSetList{}, + &DaemonSet{}, + &ThirdPartyResourceData{}, + &ThirdPartyResourceDataList{}, + &Ingress{}, + &IngressList{}, + &ReplicaSet{}, + &ReplicaSetList{}, + &PodSecurityPolicy{}, + &PodSecurityPolicyList{}, + &NetworkPolicy{}, + &NetworkPolicyList{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/types.go b/vendor/k8s.io/client-go/pkg/apis/extensions/types.go new file mode 100644 index 000000000..161158dcb --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/types.go @@ -0,0 +1,1152 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +This file (together with pkg/apis/extensions/v1beta1/types.go) contain the experimental +types in kubernetes. These API objects are experimental, meaning that the +APIs may be broken at any time by the kubernetes team. + +DISCLAIMER: The implementation of the experimental API group itself is +a temporary one meant as a stopgap solution until kubernetes has proper +support for multiple API groups. The transition may require changes +beyond registration differences. In other words, experimental API group +support is experimental. +*/ + +package extensions + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/pkg/api" +) + +const ( + // SysctlsPodSecurityPolicyAnnotationKey represents the key of a whitelist of + // allowed safe and unsafe sysctls in a pod spec. It's a comma-separated list of plain sysctl + // names or sysctl patterns (which end in *). The string "*" matches all sysctls. + SysctlsPodSecurityPolicyAnnotationKey string = "security.alpha.kubernetes.io/sysctls" +) + +// describes the attributes of a scale subresource +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 +} + +// represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 + + // label query over pods that should match the replicas count. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector +} + +// +genclient=true +// +noMethods=true + +// represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus +} + +// Dummy definition +type ReplicationControllerDummy struct { + metav1.TypeMeta +} + +// Alpha-level support for Custom Metrics in HPA (as annotations). +type CustomMetricTarget struct { + // Custom Metric name. + Name string + // Custom Metric value (average). + TargetValue resource.Quantity +} + +type CustomMetricTargetList struct { + Items []CustomMetricTarget +} + +type CustomMetricCurrentStatus struct { + // Custom Metric name. + Name string + // Custom Metric value (average). + CurrentValue resource.Quantity +} + +type CustomMetricCurrentStatusList struct { + Items []CustomMetricCurrentStatus +} + +// +genclient=true +// +nonNamespaced=true + +// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource +// types to the API. It consists of one or more Versions of the api. +type ThirdPartyResource struct { + metav1.TypeMeta + + // Standard object metadata + // +optional + metav1.ObjectMeta + + // Description is the description of this object. + // +optional + Description string + + // Versions are versions for this third party object + Versions []APIVersion +} + +type ThirdPartyResourceList struct { + metav1.TypeMeta + + // Standard list metadata. + // +optional + metav1.ListMeta + + // Items is the list of horizontal pod autoscalers. + Items []ThirdPartyResource +} + +// An APIVersion represents a single concrete version of an object model. +// TODO: we should consider merge this struct with GroupVersion in metav1.go +type APIVersion struct { + // Name of this version (e.g. 'v1'). + Name string +} + +// An internal object, used for versioned storage in etcd. Not exposed to the end user. +type ThirdPartyResourceData struct { + metav1.TypeMeta + // Standard object metadata. + // +optional + metav1.ObjectMeta + + // Data is the raw JSON data for this data. + // +optional + Data []byte +} + +// +genclient=true + +type Deployment struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus +} + +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas int32 + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + Selector *metav1.LabelSelector + + // Template describes the pods that will be created. + Template api.PodTemplateSpec + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy DeploymentStrategy + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + RevisionHistoryLimit *int32 + + // Indicates that the deployment is paused and will not be processed by the + // deployment controller. + // +optional + Paused bool + + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + RollbackTo *RollbackConfig + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Once autoRollback is + // implemented, the deployment controller will automatically rollback failed + // deployments. Note that progress will not be estimated during the time a + // deployment is paused. This is not set by default. + ProgressDeadlineSeconds *int32 +} + +// DeploymentRollback stores the information required to rollback a deployment. +type DeploymentRollback struct { + metav1.TypeMeta + // Required: This must match the Name of a deployment. + Name string + // The annotations to be updated to a deployment + // +optional + UpdatedAnnotations map[string]string + // The config of this deployment rollback. + RollbackTo RollbackConfig +} + +type RollbackConfig struct { + // The revision to rollback to. If set to 0, rollback to the last revision. + // +optional + Revision int64 +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing RCs (and label key that is added to its pods) to prevent the existing RCs + // to select new pods (and old pods being select by new RC). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // By default, a fixed value of 1 is used. + // Example: when this is set to 30%, the old RC can be scaled down by 30% + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that at least 70% of original number of pods are available at all times + // during the update. + // +optional + MaxUnavailable intstr.IntOrString + + // The maximum number of pods that can be scheduled above the original number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of total pods at + // the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // By default, a value of 1 is used. + // Example: when this is set to 30%, the new RC can be scaled up by 30% + // immediately when the rolling update starts. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of original pods. + // +optional + MaxSurge intstr.IntOrString +} + +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 + + // Total number of unavailable pods targeted by this deployment. + // +optional + UnavailableReplicas int32 + + // Represents the latest available observations of a deployment's current state. + Conditions []DeploymentCondition + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int64 +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // The last time this condition was updated. + LastUpdateTime metav1.Time + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + Reason string + // A human readable message indicating details about the transition. + Message string +} + +type DeploymentList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is the list of deployments. + Items []Deployment +} + +type DaemonSetUpdateStrategy struct { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". + // Default is OnDelete. + // +optional + Type DaemonSetUpdateStrategyType + + // Rolling update config params. Present only if type = "RollingUpdate". + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet +} + +type DaemonSetUpdateStrategyType string + +const ( + // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. + RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" +) + +// Spec to control the desired behavior of daemon set rolling update. +type RollingUpdateDaemonSet struct { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + MaxUnavailable intstr.IntOrString +} + +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // If empty, defaulted to labels on Pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + Template api.PodTemplateSpec + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + MinReadySeconds int32 + + // DEPRECATED. + // A sequence number representing a specific generation of the template. + // Populated by the system. It can be set only during the creation. + // +optional + TemplateGeneration int64 + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 +} + +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + CurrentNumberScheduled int32 + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + NumberMisscheduled int32 + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + DesiredNumberScheduled int32 + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + NumberReady int32 + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + CollisionCount *int64 +} + +// +genclient=true + +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec DaemonSetSpec + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status DaemonSetStatus +} + +const ( + // DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead. + // DaemonSetTemplateGenerationKey is the key of the labels that is added + // to daemon set pods to distinguish between old and new pod templates + // during DaemonSet template update. + DaemonSetTemplateGenerationKey string = "pod-template-generation" +) + +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { + metav1.TypeMeta + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + + // A list of daemon sets. + Items []DaemonSet +} + +type ThirdPartyResourceDataList struct { + metav1.TypeMeta + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + // Items is a list of third party objects + Items []ThirdPartyResourceData +} + +// +genclient=true + +// Ingress is a collection of rules that allow inbound connections to reach the +// endpoints defined by a backend. An Ingress can be configured to give services +// externally-reachable urls, load balance traffic, terminate SSL, offer name +// based virtual hosting etc. +type Ingress struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Spec is the desired state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec IngressSpec + + // Status is the current state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status IngressStatus +} + +// IngressList is a collection of Ingress. +type IngressList struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + + // Items is the list of Ingress. + Items []Ingress +} + +// IngressSpec describes the Ingress the user wishes to exist. +type IngressSpec struct { + // A default backend capable of servicing requests that don't match any + // rule. At least one of 'backend' or 'rules' must be specified. This field + // is optional to allow the loadbalancer controller or defaulting logic to + // specify a global default. + // +optional + Backend *IngressBackend + + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + // +optional + TLS []IngressTLS + + // A list of host rules used to configure the Ingress. If unspecified, or + // no rule matches, all traffic is sent to the default backend. + // +optional + Rules []IngressRule + // TODO: Add the ability to specify load-balancer IP through claims +} + +// IngressTLS describes the transport layer security associated with an Ingress. +type IngressTLS struct { + // Hosts are a list of hosts included in the TLS certificate. The values in + // this list must match the name/s used in the tlsSecret. Defaults to the + // wildcard host setting for the loadbalancer controller fulfilling this + // Ingress, if left unspecified. + // +optional + Hosts []string + // SecretName is the name of the secret used to terminate SSL traffic on 443. + // Field is left optional to allow SSL routing based on SNI hostname alone. + // If the SNI host in a listener conflicts with the "Host" header field used + // by an IngressRule, the SNI host is used for termination and value of the + // Host header is used for routing. + // +optional + SecretName string + // TODO: Consider specifying different modes of termination, protocols etc. +} + +// IngressStatus describe the current state of the Ingress. +type IngressStatus struct { + // LoadBalancer contains the current status of the load-balancer. + // +optional + LoadBalancer api.LoadBalancerStatus +} + +// IngressRule represents the rules mapping the paths under a specified host to +// the related backend services. Incoming requests are first evaluated for a host +// match, then routed to the backend associated with the matching IngressRuleValue. +type IngressRule struct { + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. + // 2. The `:` delimiter is not respected because ports are not allowed. + // Currently the port of an Ingress is implicitly :80 for http and + // :443 for https. + // Both these may change in the future. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. + // +optional + Host string + // IngressRuleValue represents a rule to route requests for this IngressRule. + // If unspecified, the rule defaults to a http catch-all. Whether that sends + // just traffic matching the host to the default backend or all traffic to the + // default backend, is left to the controller fulfilling the Ingress. Http is + // currently the only supported IngressRuleValue. + // +optional + IngressRuleValue +} + +// IngressRuleValue represents a rule to apply against incoming requests. If the +// rule is satisfied, the request is routed to the specified backend. Currently +// mixing different types of rules in a single Ingress is disallowed, so exactly +// one of the following must be set. +type IngressRuleValue struct { + //TODO: + // 1. Consider renaming this resource and the associated rules so they + // aren't tied to Ingress. They can be used to route intra-cluster traffic. + // 2. Consider adding fields for ingress-type specific global options + // usable by a loadbalancer, like http keep-alive. + + // +optional + HTTP *HTTPIngressRuleValue +} + +// HTTPIngressRuleValue is a list of http selectors pointing to backends. +// In the example: http:///? -> backend where +// where parts of the url correspond to RFC 3986, this resource will be used +// to match against everything after the last '/' and before the first '?' +// or '#'. +type HTTPIngressRuleValue struct { + // A collection of paths that map requests to backends. + Paths []HTTPIngressPath + // TODO: Consider adding fields for ingress-type specific global + // options usable by a loadbalancer, like http keep-alive. +} + +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. +type HTTPIngressPath struct { + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. + // +optional + Path string + + // Backend defines the referenced service endpoint to which the traffic + // will be forwarded to. + Backend IngressBackend +} + +// IngressBackend describes all endpoints for a given service and port. +type IngressBackend struct { + // Specifies the name of the referenced service. + ServiceName string + + // Specifies the port of the referenced service. + ServicePort intstr.IntOrString +} + +// +genclient=true + +// ReplicaSet represents the configuration of a replica set. +type ReplicaSet struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired behavior of this ReplicaSet. + // +optional + Spec ReplicaSetSpec + + // Status is the current status of this ReplicaSet. This data may be + // out of date by some window of time. + // +optional + Status ReplicaSetStatus +} + +// ReplicaSetList is a collection of ReplicaSets. +type ReplicaSetList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ReplicaSet +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +// As the internal representation of a ReplicaSet, it must have +// a Template set. +type ReplicaSetSpec struct { + // Replicas is the number of desired replicas. + Replicas int32 + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // Selector is a label query over pods that should match the replica count. + // Must match in order to be controlled. + // If empty, defaulted to labels on pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // +optional + Template api.PodTemplateSpec +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +type ReplicaSetStatus struct { + // Replicas is the number of actual replicas. + Replicas int32 + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + FullyLabeledReplicas int32 + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + AvailableReplicas int32 + + // ObservedGeneration is the most recent generation observed by the controller. + // +optional + ObservedGeneration int64 + + // Represents the latest available observations of a replica set's current state. + // +optional + Conditions []ReplicaSetCondition +} + +type ReplicaSetConditionType string + +// These are valid conditions of a replica set. +const ( + // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created + // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted + // due to kubelet being down or finalizers are failing. + ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" +) + +// ReplicaSetCondition describes the state of a replica set at a certain point. +type ReplicaSetCondition struct { + // Type of replica set condition. + Type ReplicaSetConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + // +optional + Reason string + // A human readable message indicating details about the transition. + // +optional + Message string +} + +// +genclient=true +// +nonNamespaced=true + +// PodSecurityPolicy governs the ability to make requests that affect the SecurityContext +// that will be applied to a pod and container. +type PodSecurityPolicy struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the policy enforced. + // +optional + Spec PodSecurityPolicySpec +} + +// PodSecurityPolicySpec defines the policy enforced. +type PodSecurityPolicySpec struct { + // Privileged determines if a pod can request to be run as privileged. + // +optional + Privileged bool + // DefaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capability in both + // DefaultAddCapabilities and RequiredDropCapabilities. + // +optional + DefaultAddCapabilities []api.Capability + // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +optional + RequiredDropCapabilities []api.Capability + // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. + // +optional + AllowedCapabilities []api.Capability + // Volumes is a white list of allowed volume plugins. Empty indicates that all plugins + // may be used. + // +optional + Volumes []FSType + // HostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // +optional + HostNetwork bool + // HostPorts determines which host port ranges are allowed to be exposed. + // +optional + HostPorts []HostPortRange + // HostPID determines if the policy allows the use of HostPID in the pod spec. + // +optional + HostPID bool + // HostIPC determines if the policy allows the use of HostIPC in the pod spec. + // +optional + HostIPC bool + // SELinux is the strategy that will dictate the allowable labels that may be set. + SELinux SELinuxStrategyOptions + // RunAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + RunAsUser RunAsUserStrategyOptions + // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + SupplementalGroups SupplementalGroupsStrategyOptions + // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + FSGroup FSGroupStrategyOptions + // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + // +optional + ReadOnlyRootFilesystem bool + // AllowedHostPaths is a white list of allowed host path prefixes. Empty indicates that all + // host paths may be used. + // +optional + AllowedHostPaths []string +} + +// HostPortRange defines a range of host ports that will be enabled by a policy +// for pods to use. It requires both the start and end to be defined. +type HostPortRange struct { + // Min is the start of the range, inclusive. + Min int + // Max is the end of the range, inclusive. + Max int +} + +// FSType gives strong typing to different file systems that are used by volumes. +type FSType string + +var ( + AzureFile FSType = "azureFile" + Flocker FSType = "flocker" + FlexVolume FSType = "flexVolume" + HostPath FSType = "hostPath" + EmptyDir FSType = "emptyDir" + GCEPersistentDisk FSType = "gcePersistentDisk" + AWSElasticBlockStore FSType = "awsElasticBlockStore" + GitRepo FSType = "gitRepo" + Secret FSType = "secret" + NFS FSType = "nfs" + ISCSI FSType = "iscsi" + Glusterfs FSType = "glusterfs" + PersistentVolumeClaim FSType = "persistentVolumeClaim" + RBD FSType = "rbd" + Cinder FSType = "cinder" + CephFS FSType = "cephFS" + DownwardAPI FSType = "downwardAPI" + FC FSType = "fc" + ConfigMap FSType = "configMap" + VsphereVolume FSType = "vsphereVolume" + Quobyte FSType = "quobyte" + AzureDisk FSType = "azureDisk" + PhotonPersistentDisk FSType = "photonPersistentDisk" + StorageOS FSType = "storageos" + Projected FSType = "projected" + PortworxVolume FSType = "portworxVolume" + ScaleIO FSType = "scaleIO" + All FSType = "*" +) + +// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. +type SELinuxStrategyOptions struct { + // Rule is the strategy that will dictate the allowable labels that may be set. + Rule SELinuxStrategy + // seLinuxOptions required to run as; required for MustRunAs + // More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md + // +optional + SELinuxOptions *api.SELinuxOptions +} + +// SELinuxStrategy denotes strategy types for generating SELinux options for a +// Security. +type SELinuxStrategy string + +const ( + // container must have SELinux labels of X applied. + SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" + // container may make requests for any SELinux context labels. + SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" +) + +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +type RunAsUserStrategyOptions struct { + // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. + Rule RunAsUserStrategy + // Ranges are the allowed ranges of uids that may be used. + // +optional + Ranges []UserIDRange +} + +// UserIDRange provides a min/max of an allowed range of UserIDs. +type UserIDRange struct { + // Min is the start of the range, inclusive. + Min types.UnixUserID + // Max is the end of the range, inclusive. + Max types.UnixUserID +} + +// GroupIDRange provides a min/max of an allowed range of GroupIDs. +type GroupIDRange struct { + // Min is the start of the range, inclusive. + Min types.UnixGroupID + // Max is the end of the range, inclusive. + Max types.UnixGroupID +} + +// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a +// SecurityContext. +type RunAsUserStrategy string + +const ( + // container must run as a particular uid. + RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" + // container must run as a non-root uid + RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" + // container may make requests for any uid. + RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" +) + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +type FSGroupStrategyOptions struct { + // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // +optional + Rule FSGroupStrategyType + // Ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. + // +optional + Ranges []GroupIDRange +} + +// FSGroupStrategyType denotes strategy types for generating FSGroup values for a +// SecurityContext +type FSGroupStrategyType string + +const ( + // container must have FSGroup of X applied. + FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" + // container may make requests for any FSGroup labels. + FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" +) + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +type SupplementalGroupsStrategyOptions struct { + // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // +optional + Rule SupplementalGroupsStrategyType + // Ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. + // +optional + Ranges []GroupIDRange +} + +// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental +// groups for a SecurityContext. +type SupplementalGroupsStrategyType string + +const ( + // container must run as a particular gid. + SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" + // container may make requests for any gid. + SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" +) + +// PodSecurityPolicyList is a list of PodSecurityPolicy objects. +type PodSecurityPolicyList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []PodSecurityPolicy +} + +// +genclient=true + +// NetworkPolicy describes what network traffic is allowed for a set of Pods +type NetworkPolicy struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Specification of the desired behavior for this NetworkPolicy. + // +optional + Spec NetworkPolicySpec +} + +type NetworkPolicySpec struct { + // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules + // is applied to any pods selected by this field. Multiple network policies can select the + // same set of pods. In this case, the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. + PodSelector metav1.LabelSelector + + // List of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod + // OR if the traffic source is the pod's local node, + // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy + // objects whose podSelector matches the pod. + // If this field is empty then this NetworkPolicy does not allow any traffic + // (and serves solely to ensure that the pods it selects are isolated by default). + // +optional + Ingress []NetworkPolicyIngressRule +} + +// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. +type NetworkPolicyIngressRule struct { + // List of ports which should be made accessible on the pods selected for this rule. + // Each item in this list is combined using a logical OR. + // If this field is empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows traffic + // only if the traffic matches at least one port in the list. + // +optional + Ports []NetworkPolicyPort + + // List of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. + // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). + // If this field is present and contains at least on item, this rule allows traffic only if the + // traffic matches at least one item in the from list. + // +optional + From []NetworkPolicyPeer +} + +type NetworkPolicyPort struct { + // Optional. The protocol (TCP or UDP) which traffic must match. + // If not specified, this field defaults to TCP. + // +optional + Protocol *api.Protocol + + // If specified, the port on the given protocol. This can + // either be a numerical or named port on a pod. If this field is not provided, + // this matches all port names and numbers. + // If present, only traffic on the specified protocol AND port + // will be matched. + // +optional + Port *intstr.IntOrString +} + +type NetworkPolicyPeer struct { + // Exactly one of the following must be specified. + + // This is a label selector which selects Pods in this namespace. + // This field follows standard label selector semantics. + // If present but empty, this selector selects all pods in this namespace. + // +optional + PodSelector *metav1.LabelSelector + + // Selects Namespaces using cluster scoped-labels. This + // matches all pods in all namespaces selected by this label selector. + // This field follows standard label selector semantics. + // If present but empty, this selector selects all namespaces. + // +optional + NamespaceSelector *metav1.LabelSelector +} + +// NetworkPolicyList is a list of NetworkPolicy objects. +type NetworkPolicyList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []NetworkPolicy +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/extensions/zz_generated.deepcopy.go new file mode 100644 index 000000000..48243b9d1 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/zz_generated.deepcopy.go @@ -0,0 +1,1151 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package extensions + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" + api "k8s.io/client-go/pkg/api" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_APIVersion, InType: reflect.TypeOf(&APIVersion{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_CustomMetricCurrentStatus, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_CustomMetricCurrentStatusList, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_CustomMetricTarget, InType: reflect.TypeOf(&CustomMetricTarget{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_CustomMetricTargetList, InType: reflect.TypeOf(&CustomMetricTargetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSet, InType: reflect.TypeOf(&DaemonSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetList, InType: reflect.TypeOf(&DaemonSetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetSpec, InType: reflect.TypeOf(&DaemonSetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetStatus, InType: reflect.TypeOf(&DaemonSetStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetUpdateStrategy, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_Deployment, InType: reflect.TypeOf(&Deployment{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentList, InType: reflect.TypeOf(&DeploymentList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentRollback, InType: reflect.TypeOf(&DeploymentRollback{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentSpec, InType: reflect.TypeOf(&DeploymentSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentStatus, InType: reflect.TypeOf(&DeploymentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentStrategy, InType: reflect.TypeOf(&DeploymentStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_FSGroupStrategyOptions, InType: reflect.TypeOf(&FSGroupStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_GroupIDRange, InType: reflect.TypeOf(&GroupIDRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_HTTPIngressPath, InType: reflect.TypeOf(&HTTPIngressPath{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_HTTPIngressRuleValue, InType: reflect.TypeOf(&HTTPIngressRuleValue{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_HostPortRange, InType: reflect.TypeOf(&HostPortRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_Ingress, InType: reflect.TypeOf(&Ingress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressBackend, InType: reflect.TypeOf(&IngressBackend{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressList, InType: reflect.TypeOf(&IngressList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressRule, InType: reflect.TypeOf(&IngressRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressRuleValue, InType: reflect.TypeOf(&IngressRuleValue{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressSpec, InType: reflect.TypeOf(&IngressSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressStatus, InType: reflect.TypeOf(&IngressStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressTLS, InType: reflect.TypeOf(&IngressTLS{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicy, InType: reflect.TypeOf(&NetworkPolicy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicyIngressRule, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicyList, InType: reflect.TypeOf(&NetworkPolicyList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicyPeer, InType: reflect.TypeOf(&NetworkPolicyPeer{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicyPort, InType: reflect.TypeOf(&NetworkPolicyPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicySpec, InType: reflect.TypeOf(&NetworkPolicySpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_PodSecurityPolicy, InType: reflect.TypeOf(&PodSecurityPolicy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_PodSecurityPolicyList, InType: reflect.TypeOf(&PodSecurityPolicyList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_PodSecurityPolicySpec, InType: reflect.TypeOf(&PodSecurityPolicySpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSet, InType: reflect.TypeOf(&ReplicaSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetCondition, InType: reflect.TypeOf(&ReplicaSetCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetList, InType: reflect.TypeOf(&ReplicaSetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetSpec, InType: reflect.TypeOf(&ReplicaSetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetStatus, InType: reflect.TypeOf(&ReplicaSetStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicationControllerDummy, InType: reflect.TypeOf(&ReplicationControllerDummy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollbackConfig, InType: reflect.TypeOf(&RollbackConfig{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollingUpdateDaemonSet, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollingUpdateDeployment, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RunAsUserStrategyOptions, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_SELinuxStrategyOptions, InType: reflect.TypeOf(&SELinuxStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_Scale, InType: reflect.TypeOf(&Scale{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_SupplementalGroupsStrategyOptions, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResource, InType: reflect.TypeOf(&ThirdPartyResource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResourceData, InType: reflect.TypeOf(&ThirdPartyResourceData{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResourceDataList, InType: reflect.TypeOf(&ThirdPartyResourceDataList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResourceList, InType: reflect.TypeOf(&ThirdPartyResourceList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_UserIDRange, InType: reflect.TypeOf(&UserIDRange{})}, + ) +} + +// DeepCopy_extensions_APIVersion is an autogenerated deepcopy function. +func DeepCopy_extensions_APIVersion(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIVersion) + out := out.(*APIVersion) + *out = *in + return nil + } +} + +// DeepCopy_extensions_CustomMetricCurrentStatus is an autogenerated deepcopy function. +func DeepCopy_extensions_CustomMetricCurrentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricCurrentStatus) + out := out.(*CustomMetricCurrentStatus) + *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() + return nil + } +} + +// DeepCopy_extensions_CustomMetricCurrentStatusList is an autogenerated deepcopy function. +func DeepCopy_extensions_CustomMetricCurrentStatusList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricCurrentStatusList) + out := out.(*CustomMetricCurrentStatusList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricCurrentStatus, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_CustomMetricCurrentStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_extensions_CustomMetricTarget is an autogenerated deepcopy function. +func DeepCopy_extensions_CustomMetricTarget(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricTarget) + out := out.(*CustomMetricTarget) + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return nil + } +} + +// DeepCopy_extensions_CustomMetricTargetList is an autogenerated deepcopy function. +func DeepCopy_extensions_CustomMetricTargetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricTargetList) + out := out.(*CustomMetricTargetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricTarget, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_CustomMetricTarget(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_extensions_DaemonSet is an autogenerated deepcopy function. +func DeepCopy_extensions_DaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSet) + out := out.(*DaemonSet) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_DaemonSetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_extensions_DaemonSetStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_extensions_DaemonSetList is an autogenerated deepcopy function. +func DeepCopy_extensions_DaemonSetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetList) + out := out.(*DaemonSetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_DaemonSet(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_extensions_DaemonSetSpec is an autogenerated deepcopy function. +func DeepCopy_extensions_DaemonSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetSpec) + out := out.(*DaemonSetSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if err := DeepCopy_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, c); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_extensions_DaemonSetStatus is an autogenerated deepcopy function. +func DeepCopy_extensions_DaemonSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetStatus) + out := out.(*DaemonSetStatus) + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int64) + **out = **in + } + return nil + } +} + +// DeepCopy_extensions_DaemonSetUpdateStrategy is an autogenerated deepcopy function. +func DeepCopy_extensions_DaemonSetUpdateStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetUpdateStrategy) + out := out.(*DaemonSetUpdateStrategy) + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDaemonSet) + **out = **in + } + return nil + } +} + +// DeepCopy_extensions_Deployment is an autogenerated deepcopy function. +func DeepCopy_extensions_Deployment(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Deployment) + out := out.(*Deployment) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_DeploymentSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_extensions_DeploymentStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_extensions_DeploymentCondition is an autogenerated deepcopy function. +func DeepCopy_extensions_DeploymentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentCondition) + out := out.(*DeploymentCondition) + *out = *in + out.LastUpdateTime = in.LastUpdateTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +// DeepCopy_extensions_DeploymentList is an autogenerated deepcopy function. +func DeepCopy_extensions_DeploymentList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentList) + out := out.(*DeploymentList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_Deployment(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_extensions_DeploymentRollback is an autogenerated deepcopy function. +func DeepCopy_extensions_DeploymentRollback(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentRollback) + out := out.(*DeploymentRollback) + *out = *in + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +// DeepCopy_extensions_DeploymentSpec is an autogenerated deepcopy function. +func DeepCopy_extensions_DeploymentSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentSpec) + out := out.(*DeploymentSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if err := DeepCopy_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, c); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.RollbackTo != nil { + in, out := &in.RollbackTo, &out.RollbackTo + *out = new(RollbackConfig) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_extensions_DeploymentStatus is an autogenerated deepcopy function. +func DeepCopy_extensions_DeploymentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentStatus) + out := out.(*DeploymentStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_DeploymentCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int64) + **out = **in + } + return nil + } +} + +// DeepCopy_extensions_DeploymentStrategy is an autogenerated deepcopy function. +func DeepCopy_extensions_DeploymentStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentStrategy) + out := out.(*DeploymentStrategy) + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + **out = **in + } + return nil + } +} + +// DeepCopy_extensions_FSGroupStrategyOptions is an autogenerated deepcopy function. +func DeepCopy_extensions_FSGroupStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FSGroupStrategyOptions) + out := out.(*FSGroupStrategyOptions) + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]GroupIDRange, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_extensions_GroupIDRange is an autogenerated deepcopy function. +func DeepCopy_extensions_GroupIDRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupIDRange) + out := out.(*GroupIDRange) + *out = *in + return nil + } +} + +// DeepCopy_extensions_HTTPIngressPath is an autogenerated deepcopy function. +func DeepCopy_extensions_HTTPIngressPath(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPIngressPath) + out := out.(*HTTPIngressPath) + *out = *in + return nil + } +} + +// DeepCopy_extensions_HTTPIngressRuleValue is an autogenerated deepcopy function. +func DeepCopy_extensions_HTTPIngressRuleValue(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPIngressRuleValue) + out := out.(*HTTPIngressRuleValue) + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]HTTPIngressPath, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_extensions_HostPortRange is an autogenerated deepcopy function. +func DeepCopy_extensions_HostPortRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HostPortRange) + out := out.(*HostPortRange) + *out = *in + return nil + } +} + +// DeepCopy_extensions_Ingress is an autogenerated deepcopy function. +func DeepCopy_extensions_Ingress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Ingress) + out := out.(*Ingress) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_IngressSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_extensions_IngressStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_extensions_IngressBackend is an autogenerated deepcopy function. +func DeepCopy_extensions_IngressBackend(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressBackend) + out := out.(*IngressBackend) + *out = *in + return nil + } +} + +// DeepCopy_extensions_IngressList is an autogenerated deepcopy function. +func DeepCopy_extensions_IngressList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressList) + out := out.(*IngressList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_Ingress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_extensions_IngressRule is an autogenerated deepcopy function. +func DeepCopy_extensions_IngressRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressRule) + out := out.(*IngressRule) + *out = *in + if err := DeepCopy_extensions_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_extensions_IngressRuleValue is an autogenerated deepcopy function. +func DeepCopy_extensions_IngressRuleValue(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressRuleValue) + out := out.(*IngressRuleValue) + *out = *in + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPIngressRuleValue) + if err := DeepCopy_extensions_HTTPIngressRuleValue(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_extensions_IngressSpec is an autogenerated deepcopy function. +func DeepCopy_extensions_IngressSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressSpec) + out := out.(*IngressSpec) + *out = *in + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = new(IngressBackend) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]IngressTLS, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_IngressTLS(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]IngressRule, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_IngressRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_extensions_IngressStatus is an autogenerated deepcopy function. +func DeepCopy_extensions_IngressStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressStatus) + out := out.(*IngressStatus) + *out = *in + if err := api.DeepCopy_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_extensions_IngressTLS is an autogenerated deepcopy function. +func DeepCopy_extensions_IngressTLS(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressTLS) + out := out.(*IngressTLS) + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_extensions_NetworkPolicy is an autogenerated deepcopy function. +func DeepCopy_extensions_NetworkPolicy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicy) + out := out.(*NetworkPolicy) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_NetworkPolicySpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_extensions_NetworkPolicyIngressRule is an autogenerated deepcopy function. +func DeepCopy_extensions_NetworkPolicyIngressRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyIngressRule) + out := out.(*NetworkPolicyIngressRule) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_NetworkPolicyPort(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.From != nil { + in, out := &in.From, &out.From + *out = make([]NetworkPolicyPeer, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_NetworkPolicyPeer(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_extensions_NetworkPolicyList is an autogenerated deepcopy function. +func DeepCopy_extensions_NetworkPolicyList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyList) + out := out.(*NetworkPolicyList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetworkPolicy, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_NetworkPolicy(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_extensions_NetworkPolicyPeer is an autogenerated deepcopy function. +func DeepCopy_extensions_NetworkPolicyPeer(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyPeer) + out := out.(*NetworkPolicyPeer) + *out = *in + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + return nil + } +} + +// DeepCopy_extensions_NetworkPolicyPort is an autogenerated deepcopy function. +func DeepCopy_extensions_NetworkPolicyPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyPort) + out := out.(*NetworkPolicyPort) + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(api.Protocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(intstr.IntOrString) + **out = **in + } + return nil + } +} + +// DeepCopy_extensions_NetworkPolicySpec is an autogenerated deepcopy function. +func DeepCopy_extensions_NetworkPolicySpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicySpec) + out := out.(*NetworkPolicySpec) + *out = *in + if newVal, err := c.DeepCopy(&in.PodSelector); err != nil { + return err + } else { + out.PodSelector = *newVal.(*v1.LabelSelector) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]NetworkPolicyIngressRule, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_NetworkPolicyIngressRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_extensions_PodSecurityPolicy is an autogenerated deepcopy function. +func DeepCopy_extensions_PodSecurityPolicy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityPolicy) + out := out.(*PodSecurityPolicy) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_PodSecurityPolicySpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_extensions_PodSecurityPolicyList is an autogenerated deepcopy function. +func DeepCopy_extensions_PodSecurityPolicyList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityPolicyList) + out := out.(*PodSecurityPolicyList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodSecurityPolicy, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_PodSecurityPolicy(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_extensions_PodSecurityPolicySpec is an autogenerated deepcopy function. +func DeepCopy_extensions_PodSecurityPolicySpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityPolicySpec) + out := out.(*PodSecurityPolicySpec) + *out = *in + if in.DefaultAddCapabilities != nil { + in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities + *out = make([]api.Capability, len(*in)) + copy(*out, *in) + } + if in.RequiredDropCapabilities != nil { + in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities + *out = make([]api.Capability, len(*in)) + copy(*out, *in) + } + if in.AllowedCapabilities != nil { + in, out := &in.AllowedCapabilities, &out.AllowedCapabilities + *out = make([]api.Capability, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]FSType, len(*in)) + copy(*out, *in) + } + if in.HostPorts != nil { + in, out := &in.HostPorts, &out.HostPorts + *out = make([]HostPortRange, len(*in)) + copy(*out, *in) + } + if err := DeepCopy_extensions_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, c); err != nil { + return err + } + if err := DeepCopy_extensions_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, c); err != nil { + return err + } + if err := DeepCopy_extensions_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, c); err != nil { + return err + } + if err := DeepCopy_extensions_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, c); err != nil { + return err + } + if in.AllowedHostPaths != nil { + in, out := &in.AllowedHostPaths, &out.AllowedHostPaths + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_extensions_ReplicaSet is an autogenerated deepcopy function. +func DeepCopy_extensions_ReplicaSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSet) + out := out.(*ReplicaSet) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_extensions_ReplicaSetStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_extensions_ReplicaSetCondition is an autogenerated deepcopy function. +func DeepCopy_extensions_ReplicaSetCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetCondition) + out := out.(*ReplicaSetCondition) + *out = *in + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +// DeepCopy_extensions_ReplicaSetList is an autogenerated deepcopy function. +func DeepCopy_extensions_ReplicaSetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetList) + out := out.(*ReplicaSetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_ReplicaSet(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_extensions_ReplicaSetSpec is an autogenerated deepcopy function. +func DeepCopy_extensions_ReplicaSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetSpec) + out := out.(*ReplicaSetSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_extensions_ReplicaSetStatus is an autogenerated deepcopy function. +func DeepCopy_extensions_ReplicaSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetStatus) + out := out.(*ReplicaSetStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_ReplicaSetCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_extensions_ReplicationControllerDummy is an autogenerated deepcopy function. +func DeepCopy_extensions_ReplicationControllerDummy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerDummy) + out := out.(*ReplicationControllerDummy) + *out = *in + return nil + } +} + +// DeepCopy_extensions_RollbackConfig is an autogenerated deepcopy function. +func DeepCopy_extensions_RollbackConfig(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollbackConfig) + out := out.(*RollbackConfig) + *out = *in + return nil + } +} + +// DeepCopy_extensions_RollingUpdateDaemonSet is an autogenerated deepcopy function. +func DeepCopy_extensions_RollingUpdateDaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollingUpdateDaemonSet) + out := out.(*RollingUpdateDaemonSet) + *out = *in + return nil + } +} + +// DeepCopy_extensions_RollingUpdateDeployment is an autogenerated deepcopy function. +func DeepCopy_extensions_RollingUpdateDeployment(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollingUpdateDeployment) + out := out.(*RollingUpdateDeployment) + *out = *in + return nil + } +} + +// DeepCopy_extensions_RunAsUserStrategyOptions is an autogenerated deepcopy function. +func DeepCopy_extensions_RunAsUserStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RunAsUserStrategyOptions) + out := out.(*RunAsUserStrategyOptions) + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]UserIDRange, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_extensions_SELinuxStrategyOptions is an autogenerated deepcopy function. +func DeepCopy_extensions_SELinuxStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SELinuxStrategyOptions) + out := out.(*SELinuxStrategyOptions) + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(api.SELinuxOptions) + **out = **in + } + return nil + } +} + +// DeepCopy_extensions_Scale is an autogenerated deepcopy function. +func DeepCopy_extensions_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Scale) + out := out.(*Scale) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_ScaleStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_extensions_ScaleSpec is an autogenerated deepcopy function. +func DeepCopy_extensions_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleSpec) + out := out.(*ScaleSpec) + *out = *in + return nil + } +} + +// DeepCopy_extensions_ScaleStatus is an autogenerated deepcopy function. +func DeepCopy_extensions_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleStatus) + out := out.(*ScaleStatus) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + return nil + } +} + +// DeepCopy_extensions_SupplementalGroupsStrategyOptions is an autogenerated deepcopy function. +func DeepCopy_extensions_SupplementalGroupsStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SupplementalGroupsStrategyOptions) + out := out.(*SupplementalGroupsStrategyOptions) + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]GroupIDRange, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_extensions_ThirdPartyResource is an autogenerated deepcopy function. +func DeepCopy_extensions_ThirdPartyResource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResource) + out := out.(*ThirdPartyResource) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]APIVersion, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_extensions_ThirdPartyResourceData is an autogenerated deepcopy function. +func DeepCopy_extensions_ThirdPartyResourceData(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResourceData) + out := out.(*ThirdPartyResourceData) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_extensions_ThirdPartyResourceDataList is an autogenerated deepcopy function. +func DeepCopy_extensions_ThirdPartyResourceDataList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResourceDataList) + out := out.(*ThirdPartyResourceDataList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ThirdPartyResourceData, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_ThirdPartyResourceData(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_extensions_ThirdPartyResourceList is an autogenerated deepcopy function. +func DeepCopy_extensions_ThirdPartyResourceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResourceList) + out := out.(*ThirdPartyResourceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ThirdPartyResource, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_ThirdPartyResource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_extensions_UserIDRange is an autogenerated deepcopy function. +func DeepCopy_extensions_UserIDRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*UserIDRange) + out := out.(*UserIDRange) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/util/doc.go b/vendor/k8s.io/client-go/pkg/util/doc.go new file mode 100644 index 000000000..1747db550 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/util/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package util implements various utility functions used in both testing and implementation +// of Kubernetes. Package util may not depend on any other package in the Kubernetes +// package tree. +package util diff --git a/vendor/k8s.io/client-go/pkg/util/parsers/parsers.go b/vendor/k8s.io/client-go/pkg/util/parsers/parsers.go new file mode 100644 index 000000000..4e70cc682 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/util/parsers/parsers.go @@ -0,0 +1,54 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parsers + +import ( + "fmt" + + dockerref "github.com/docker/distribution/reference" +) + +const ( + DefaultImageTag = "latest" +) + +// ParseImageName parses a docker image string into three parts: repo, tag and digest. +// If both tag and digest are empty, a default image tag will be returned. +func ParseImageName(image string) (string, string, string, error) { + named, err := dockerref.ParseNamed(image) + if err != nil { + return "", "", "", fmt.Errorf("couldn't parse image name: %v", err) + } + + repoToPull := named.Name() + var tag, digest string + + tagged, ok := named.(dockerref.Tagged) + if ok { + tag = tagged.Tag() + } + + digested, ok := named.(dockerref.Digested) + if ok { + digest = digested.Digest().String() + } + // If no tag was specified, use the default "latest". + if len(tag) == 0 && len(digest) == 0 { + tag = DefaultImageTag + } + return repoToPull, tag, digest, nil +} diff --git a/vendor/k8s.io/client-go/pkg/util/template.go b/vendor/k8s.io/client-go/pkg/util/template.go new file mode 100644 index 000000000..d09d7dc86 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/util/template.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bytes" + "go/doc" + "io" + "strings" + "text/template" +) + +func wrap(indent string, s string) string { + var buf bytes.Buffer + doc.ToText(&buf, s, indent, indent+" ", 80-len(indent)) + return buf.String() +} + +// ExecuteTemplate executes templateText with data and output written to w. +func ExecuteTemplate(w io.Writer, templateText string, data interface{}) error { + t := template.New("top") + t.Funcs(template.FuncMap{ + "trim": strings.TrimSpace, + "wrap": wrap, + }) + template.Must(t.Parse(templateText)) + return t.Execute(w, data) +} + +func ExecuteTemplateToString(templateText string, data interface{}) (string, error) { + b := bytes.Buffer{} + err := ExecuteTemplate(&b, templateText, data) + return b.String(), err +} diff --git a/vendor/k8s.io/client-go/pkg/util/umask.go b/vendor/k8s.io/client-go/pkg/util/umask.go new file mode 100644 index 000000000..35ccce50b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/util/umask.go @@ -0,0 +1,27 @@ +// +build !windows + +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "syscall" +) + +func Umask(mask int) (old int, err error) { + return syscall.Umask(mask), nil +} diff --git a/vendor/k8s.io/client-go/pkg/util/umask_windows.go b/vendor/k8s.io/client-go/pkg/util/umask_windows.go new file mode 100644 index 000000000..7a1ba1538 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/util/umask_windows.go @@ -0,0 +1,27 @@ +// +build windows + +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "errors" +) + +func Umask(mask int) (int, error) { + return 0, errors.New("platform and architecture is not supported") +} diff --git a/vendor/k8s.io/client-go/pkg/util/util.go b/vendor/k8s.io/client-go/pkg/util/util.go new file mode 100644 index 000000000..356b295a3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/util/util.go @@ -0,0 +1,131 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "os" + "reflect" + "regexp" +) + +// Takes a list of strings and compiles them into a list of regular expressions +func CompileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { + regexps := []*regexp.Regexp{} + for _, regexpStr := range regexpStrings { + r, err := regexp.Compile(regexpStr) + if err != nil { + return []*regexp.Regexp{}, err + } + regexps = append(regexps, r) + } + return regexps, nil +} + +// Detects if using systemd as the init system +// Please note that simply reading /proc/1/cmdline can be misleading because +// some installation of various init programs can automatically make /sbin/init +// a symlink or even a renamed version of their main program. +// TODO(dchen1107): realiably detects the init system using on the system: +// systemd, upstart, initd, etc. +func UsingSystemdInitSystem() bool { + if _, err := os.Stat("/run/systemd/system"); err == nil { + return true + } + + return false +} + +// Tests whether all pointer fields in a struct are nil. This is useful when, +// for example, an API struct is handled by plugins which need to distinguish +// "no plugin accepted this spec" from "this spec is empty". +// +// This function is only valid for structs and pointers to structs. Any other +// type will cause a panic. Passing a typed nil pointer will return true. +func AllPtrFieldsNil(obj interface{}) bool { + v := reflect.ValueOf(obj) + if !v.IsValid() { + panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return true + } + v = v.Elem() + } + for i := 0; i < v.NumField(); i++ { + if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { + return false + } + } + return true +} + +func FileExists(filename string) (bool, error) { + if _, err := os.Stat(filename); os.IsNotExist(err) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + +// ReadDirNoStat returns a string of files/directories contained +// in dirname without calling lstat on them. +func ReadDirNoStat(dirname string) ([]string, error) { + if dirname == "" { + dirname = "." + } + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + return f.Readdirnames(-1) +} + +// IntPtr returns a pointer to an int +func IntPtr(i int) *int { + o := i + return &o +} + +// Int32Ptr returns a pointer to an int32 +func Int32Ptr(i int32) *int32 { + o := i + return &o +} + +// IntPtrDerefOr dereference the int ptr and returns it i not nil, +// else returns def. +func IntPtrDerefOr(ptr *int, def int) int { + if ptr != nil { + return *ptr + } + return def +} + +// Int32PtrDerefOr dereference the int32 ptr and returns it i not nil, +// else returns def. +func Int32PtrDerefOr(ptr *int32, def int32) int32 { + if ptr != nil { + return *ptr + } + return def +} diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go new file mode 100644 index 000000000..c377705fe --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -0,0 +1,59 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +// Base version information. +// +// This is the fallback data used when version information from git is not +// provided via go ldflags. It provides an approximation of the Kubernetes +// version for ad-hoc builds (e.g. `go build`) that cannot get the version +// information from git. +// +// If you are looking at these fields in the git tree, they look +// strange. They are modified on the fly by the build process. The +// in-tree values are dummy values used for "git archive", which also +// works for GitHub tar downloads. +// +// When releasing a new Kubernetes version, this file is updated by +// build/mark_new_version.sh to reflect the new version, and then a +// git annotated tag (using format vX.Y where X == Major version and Y +// == Minor version) is created to point to the commit that updates +// pkg/version/base.go +var ( + // TODO: Deprecate gitMajor and gitMinor, use only gitVersion + // instead. First step in deprecation, keep the fields but make + // them irrelevant. (Next we'll take it out, which may muck with + // scripts consuming the kubectl version output - but most of + // these should be looking at gitVersion already anyways.) + gitMajor string = "" // major version, always numeric + gitMinor string = "" // minor version, numeric possibly followed by "+" + + // semantic version, derived by build scripts (see + // https://github.com/kubernetes/kubernetes/blob/master/docs/design/versioning.md + // for a detailed discussion of this field) + // + // TODO: This field is still called "gitVersion" for legacy + // reasons. For prerelease versions, the build metadata on the + // semantic version is a git hash, but the version itself is no + // longer the direct output of "git describe", but a slight + // translation to be semver compliant. + gitVersion string = "v0.0.0-master+$Format:%h$" + gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) + gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" + + buildDate string = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') +) diff --git a/vendor/k8s.io/client-go/pkg/version/doc.go b/vendor/k8s.io/client-go/pkg/version/doc.go new file mode 100644 index 000000000..4bf139ce8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/version/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version supplies version information collected at build time to +// kubernetes components. +package version // import "k8s.io/client-go/pkg/version" diff --git a/vendor/k8s.io/client-go/pkg/version/version.go b/vendor/k8s.io/client-go/pkg/version/version.go new file mode 100644 index 000000000..8c8350d13 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/version/version.go @@ -0,0 +1,42 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "runtime" + + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +// Get returns the overall codebase version. It's for detecting +// what code a binary was built from. +func Get() apimachineryversion.Info { + // These variables typically come from -ldflags settings and in + // their absence fallback to the settings in pkg/version/base.go + return apimachineryversion.Info{ + Major: gitMajor, + Minor: gitMinor, + GitVersion: gitVersion, + GitCommit: gitCommit, + GitTreeState: gitTreeState, + BuildDate: buildDate, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } +} diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go new file mode 100644 index 000000000..524e0d8eb --- /dev/null +++ b/vendor/k8s.io/client-go/rest/client.go @@ -0,0 +1,258 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "mime" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/flowcontrol" +) + +const ( + // Environment variables: Note that the duration should be long enough that the backoff + // persists for some reasonable time (i.e. 120 seconds). The typical base might be "1". + envBackoffBase = "KUBE_CLIENT_BACKOFF_BASE" + envBackoffDuration = "KUBE_CLIENT_BACKOFF_DURATION" +) + +// Interface captures the set of operations for generically interacting with Kubernetes REST apis. +type Interface interface { + GetRateLimiter() flowcontrol.RateLimiter + Verb(verb string) *Request + Post() *Request + Put() *Request + Patch(pt types.PatchType) *Request + Get() *Request + Delete() *Request + APIVersion() schema.GroupVersion +} + +// RESTClient imposes common Kubernetes API conventions on a set of resource paths. +// The baseURL is expected to point to an HTTP or HTTPS path that is the parent +// of one or more resources. The server should return a decodable API resource +// object, or an api.Status object which contains information about the reason for +// any failure. +// +// Most consumers should use client.New() to get a Kubernetes API client. +type RESTClient struct { + // base is the root URL for all invocations of the client + base *url.URL + // versionedAPIPath is a path segment connecting the base URL to the resource root + versionedAPIPath string + + // contentConfig is the information used to communicate with the server. + contentConfig ContentConfig + + // serializers contain all serializers for underlying content type. + serializers Serializers + + // creates BackoffManager that is passed to requests. + createBackoffMgr func() BackoffManager + + // TODO extract this into a wrapper interface via the RESTClient interface in kubectl. + Throttle flowcontrol.RateLimiter + + // Set specific behavior of the client. If not set http.DefaultClient will be used. + Client *http.Client +} + +type Serializers struct { + Encoder runtime.Encoder + Decoder runtime.Decoder + StreamingSerializer runtime.Serializer + Framer runtime.Framer + RenegotiatedDecoder func(contentType string, params map[string]string) (runtime.Decoder, error) +} + +// NewRESTClient creates a new RESTClient. This client performs generic REST functions +// such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and +// decoding of responses from the server. +func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) { + base := *baseURL + if !strings.HasSuffix(base.Path, "/") { + base.Path += "/" + } + base.RawQuery = "" + base.Fragment = "" + + if config.GroupVersion == nil { + config.GroupVersion = &schema.GroupVersion{} + } + if len(config.ContentType) == 0 { + config.ContentType = "application/json" + } + serializers, err := createSerializers(config) + if err != nil { + return nil, err + } + + var throttle flowcontrol.RateLimiter + if maxQPS > 0 && rateLimiter == nil { + throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst) + } else if rateLimiter != nil { + throttle = rateLimiter + } + return &RESTClient{ + base: &base, + versionedAPIPath: versionedAPIPath, + contentConfig: config, + serializers: *serializers, + createBackoffMgr: readExpBackoffConfig, + Throttle: throttle, + Client: client, + }, nil +} + +// GetRateLimiter returns rate limier for a given client, or nil if it's called on a nil client +func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter { + if c == nil { + return nil + } + return c.Throttle +} + +// readExpBackoffConfig handles the internal logic of determining what the +// backoff policy is. By default if no information is available, NoBackoff. +// TODO Generalize this see #17727 . +func readExpBackoffConfig() BackoffManager { + backoffBase := os.Getenv(envBackoffBase) + backoffDuration := os.Getenv(envBackoffDuration) + + backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64) + backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64) + if errBase != nil || errDuration != nil { + return &NoBackoff{} + } + return &URLBackoff{ + Backoff: flowcontrol.NewBackOff( + time.Duration(backoffBaseInt)*time.Second, + time.Duration(backoffDurationInt)*time.Second)} +} + +// createSerializers creates all necessary serializers for given contentType. +// TODO: the negotiated serializer passed to this method should probably return +// serializers that control decoding and versioning without this package +// being aware of the types. Depends on whether RESTClient must deal with +// generic infrastructure. +func createSerializers(config ContentConfig) (*Serializers, error) { + mediaTypes := config.NegotiatedSerializer.SupportedMediaTypes() + contentType := config.ContentType + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return nil, fmt.Errorf("the content type specified in the client configuration is not recognized: %v", err) + } + info, ok := runtime.SerializerInfoForMediaType(mediaTypes, mediaType) + if !ok { + if len(contentType) != 0 || len(mediaTypes) == 0 { + return nil, fmt.Errorf("no serializers registered for %s", contentType) + } + info = mediaTypes[0] + } + + internalGV := schema.GroupVersions{ + { + Group: config.GroupVersion.Group, + Version: runtime.APIVersionInternal, + }, + // always include the legacy group as a decoding target to handle non-error `Status` return types + { + Group: "", + Version: runtime.APIVersionInternal, + }, + } + + s := &Serializers{ + Encoder: config.NegotiatedSerializer.EncoderForVersion(info.Serializer, *config.GroupVersion), + Decoder: config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), + + RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) { + info, ok := runtime.SerializerInfoForMediaType(mediaTypes, contentType) + if !ok { + return nil, fmt.Errorf("serializer for %s not registered", contentType) + } + return config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), nil + }, + } + if info.StreamSerializer != nil { + s.StreamingSerializer = info.StreamSerializer.Serializer + s.Framer = info.StreamSerializer.Framer + } + + return s, nil +} + +// Verb begins a request with a verb (GET, POST, PUT, DELETE). +// +// Example usage of RESTClient's request building interface: +// c, err := NewRESTClient(...) +// if err != nil { ... } +// resp, err := c.Verb("GET"). +// Path("pods"). +// SelectorParam("labels", "area=staging"). +// Timeout(10*time.Second). +// Do() +// if err != nil { ... } +// list, ok := resp.(*api.PodList) +// +func (c *RESTClient) Verb(verb string) *Request { + backoff := c.createBackoffMgr() + + if c.Client == nil { + return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle) + } + return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle) +} + +// Post begins a POST request. Short for c.Verb("POST"). +func (c *RESTClient) Post() *Request { + return c.Verb("POST") +} + +// Put begins a PUT request. Short for c.Verb("PUT"). +func (c *RESTClient) Put() *Request { + return c.Verb("PUT") +} + +// Patch begins a PATCH request. Short for c.Verb("Patch"). +func (c *RESTClient) Patch(pt types.PatchType) *Request { + return c.Verb("PATCH").SetHeader("Content-Type", string(pt)) +} + +// Get begins a GET request. Short for c.Verb("GET"). +func (c *RESTClient) Get() *Request { + return c.Verb("GET") +} + +// Delete begins a DELETE request. Short for c.Verb("DELETE"). +func (c *RESTClient) Delete() *Request { + return c.Verb("DELETE") +} + +// APIVersion returns the APIVersion this RESTClient is expected to use. +func (c *RESTClient) APIVersion() schema.GroupVersion { + return *c.contentConfig.GroupVersion +} diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go new file mode 100644 index 000000000..b45114865 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/config.go @@ -0,0 +1,416 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + gruntime "runtime" + "strings" + "time" + + "github.com/golang/glog" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/version" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/flowcontrol" +) + +const ( + DefaultQPS float32 = 5.0 + DefaultBurst int = 10 +) + +// Config holds the common attributes that can be passed to a Kubernetes client on +// initialization. +type Config struct { + // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. + // If a URL is given then the (optional) Path of that URL represents a prefix that must + // be appended to all request URIs used to access the apiserver. This allows a frontend + // proxy to easily relocate all of the apiserver endpoints. + Host string + // APIPath is a sub-path that points to an API root. + APIPath string + // Prefix is the sub path of the server. If not specified, the client will set + // a default value. Use "/" to indicate the server root should be used + Prefix string + + // ContentConfig contains settings that affect how objects are transformed when + // sent to the server. + ContentConfig + + // Server requires Basic authentication + Username string + Password string + + // Server requires Bearer authentication. This client will not attempt to use + // refresh tokens for an OAuth2 flow. + // TODO: demonstrate an OAuth2 compatible client. + BearerToken string + + // Impersonate is the configuration that RESTClient will use for impersonation. + Impersonate ImpersonationConfig + + // Server requires plugin-specified authentication. + AuthProvider *clientcmdapi.AuthProviderConfig + + // Callback to persist config for AuthProvider. + AuthConfigPersister AuthProviderConfigPersister + + // TLSClientConfig contains settings to enable transport layer security + TLSClientConfig + + // UserAgent is an optional field that specifies the caller of this request. + UserAgent string + + // Transport may be used for custom HTTP behavior. This attribute may not + // be specified with the TLS client certificate options. Use WrapTransport + // for most client level operations. + Transport http.RoundTripper + // WrapTransport will be invoked for custom HTTP behavior after the underlying + // transport is initialized (either the transport created from TLSClientConfig, + // Transport, or http.DefaultTransport). The config may layer other RoundTrippers + // on top of the returned RoundTripper. + WrapTransport func(rt http.RoundTripper) http.RoundTripper + + // QPS indicates the maximum QPS to the master from this client. + // If it's zero, the created RESTClient will use DefaultQPS: 5 + QPS float32 + + // Maximum burst for throttle. + // If it's zero, the created RESTClient will use DefaultBurst: 10. + Burst int + + // Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst + RateLimiter flowcontrol.RateLimiter + + // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. + Timeout time.Duration + + // Version forces a specific version to be used (if registered) + // Do we need this? + // Version string +} + +// ImpersonationConfig has all the available impersonation options +type ImpersonationConfig struct { + // UserName is the username to impersonate on each request. + UserName string + // Groups are the groups to impersonate on each request. + Groups []string + // Extra is a free-form field which can be used to link some authentication information + // to authorization information. This field allows you to impersonate it. + Extra map[string][]string +} + +// TLSClientConfig contains settings to enable transport layer security +type TLSClientConfig struct { + // Server should be accessed without verifying the TLS certificate. For testing only. + Insecure bool + // ServerName is passed to the server for SNI and is used in the client to check server + // ceritificates against. If ServerName is empty, the hostname used to contact the + // server is used. + ServerName string + + // Server requires TLS client certificate authentication + CertFile string + // Server requires TLS client certificate authentication + KeyFile string + // Trusted root certificates for server + CAFile string + + // CertData holds PEM-encoded bytes (typically read from a client certificate file). + // CertData takes precedence over CertFile + CertData []byte + // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). + // KeyData takes precedence over KeyFile + KeyData []byte + // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). + // CAData takes precedence over CAFile + CAData []byte +} + +type ContentConfig struct { + // AcceptContentTypes specifies the types the client will accept and is optional. + // If not set, ContentType will be used to define the Accept header + AcceptContentTypes string + // ContentType specifies the wire format used to communicate with the server. + // This value will be set as the Accept header on requests made to the server, and + // as the default content type on any object sent to the server. If not set, + // "application/json" is used. + ContentType string + // GroupVersion is the API version to talk to. Must be provided when initializing + // a RESTClient directly. When initializing a Client, will be set with the default + // code version. + GroupVersion *schema.GroupVersion + // NegotiatedSerializer is used for obtaining encoders and decoders for multiple + // supported media types. + NegotiatedSerializer runtime.NegotiatedSerializer +} + +// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config +// object. Note that a RESTClient may require fields that are optional when initializing a Client. +// A RESTClient created by this method is generic - it expects to operate on an API that follows +// the Kubernetes conventions, but may not be the Kubernetes API. +func RESTClientFor(config *Config) (*RESTClient, error) { + if config.GroupVersion == nil { + return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient") + } + if config.NegotiatedSerializer == nil { + return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") + } + qps := config.QPS + if config.QPS == 0.0 { + qps = DefaultQPS + } + burst := config.Burst + if config.Burst == 0 { + burst = DefaultBurst + } + + baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + if err != nil { + return nil, err + } + + transport, err := TransportFor(config) + if err != nil { + return nil, err + } + + var httpClient *http.Client + if transport != http.DefaultTransport { + httpClient = &http.Client{Transport: transport} + if config.Timeout > 0 { + httpClient.Timeout = config.Timeout + } + } + + return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, qps, burst, config.RateLimiter, httpClient) +} + +// UnversionedRESTClientFor is the same as RESTClientFor, except that it allows +// the config.Version to be empty. +func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { + if config.NegotiatedSerializer == nil { + return nil, fmt.Errorf("NeogitatedSerializer is required when initializing a RESTClient") + } + + baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + if err != nil { + return nil, err + } + + transport, err := TransportFor(config) + if err != nil { + return nil, err + } + + var httpClient *http.Client + if transport != http.DefaultTransport { + httpClient = &http.Client{Transport: transport} + if config.Timeout > 0 { + httpClient.Timeout = config.Timeout + } + } + + versionConfig := config.ContentConfig + if versionConfig.GroupVersion == nil { + v := metav1.SchemeGroupVersion + versionConfig.GroupVersion = &v + } + + return NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, config.RateLimiter, httpClient) +} + +// SetKubernetesDefaults sets default values on the provided client config for accessing the +// Kubernetes API or returns an error if any of the defaults are impossible or invalid. +func SetKubernetesDefaults(config *Config) error { + if len(config.UserAgent) == 0 { + config.UserAgent = DefaultKubernetesUserAgent() + } + return nil +} + +// adjustCommit returns sufficient significant figures of the commit's git hash. +func adjustCommit(c string) string { + if len(c) == 0 { + return "unknown" + } + if len(c) > 7 { + return c[:7] + } + return c +} + +// adjustVersion strips "alpha", "beta", etc. from version in form +// major.minor.patch-[alpha|beta|etc]. +func adjustVersion(v string) string { + if len(v) == 0 { + return "unknown" + } + seg := strings.SplitN(v, "-", 2) + return seg[0] +} + +// adjustCommand returns the last component of the +// OS-specific command path for use in User-Agent. +func adjustCommand(p string) string { + // Unlikely, but better than returning "". + if len(p) == 0 { + return "unknown" + } + return filepath.Base(p) +} + +// buildUserAgent builds a User-Agent string from given args. +func buildUserAgent(command, version, os, arch, commit string) string { + return fmt.Sprintf( + "%s/%s (%s/%s) kubernetes/%s", command, version, os, arch, commit) +} + +// DefaultKubernetesUserAgent returns a User-Agent string built from static global vars. +func DefaultKubernetesUserAgent() string { + return buildUserAgent( + adjustCommand(os.Args[0]), + adjustVersion(version.Get().GitVersion), + gruntime.GOOS, + gruntime.GOARCH, + adjustCommit(version.Get().GitCommit)) +} + +// InClusterConfig returns a config object which uses the service account +// kubernetes gives to pods. It's intended for clients that expect to be +// running inside a pod running on kubernetes. It will return an error if +// called from a process not running in a kubernetes environment. +func InClusterConfig() (*Config, error) { + host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT") + if len(host) == 0 || len(port) == 0 { + return nil, fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined") + } + + token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + v1.ServiceAccountTokenKey) + if err != nil { + return nil, err + } + tlsClientConfig := TLSClientConfig{} + rootCAFile := "/var/run/secrets/kubernetes.io/serviceaccount/" + v1.ServiceAccountRootCAKey + if _, err := certutil.NewPool(rootCAFile); err != nil { + glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) + } else { + tlsClientConfig.CAFile = rootCAFile + } + + return &Config{ + // TODO: switch to using cluster DNS. + Host: "https://" + net.JoinHostPort(host, port), + BearerToken: string(token), + TLSClientConfig: tlsClientConfig, + }, nil +} + +// IsConfigTransportTLS returns true if and only if the provided +// config will result in a protected connection to the server when it +// is passed to restclient.RESTClientFor(). Use to determine when to +// send credentials over the wire. +// +// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are +// still possible. +func IsConfigTransportTLS(config Config) bool { + baseURL, _, err := defaultServerUrlFor(&config) + if err != nil { + return false + } + return baseURL.Scheme == "https" +} + +// LoadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, +// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are +// either populated or were empty to start. +func LoadTLSFiles(c *Config) error { + var err error + c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile) + if err != nil { + return err + } + + c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile) + if err != nil { + return err + } + + c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) + if err != nil { + return err + } + return nil +} + +// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, +// or an error if an error occurred reading the file +func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { + if len(data) > 0 { + return data, nil + } + if len(file) > 0 { + fileData, err := ioutil.ReadFile(file) + if err != nil { + return []byte{}, err + } + return fileData, nil + } + return nil, nil +} + +func AddUserAgent(config *Config, userAgent string) *Config { + fullUserAgent := DefaultKubernetesUserAgent() + "/" + userAgent + config.UserAgent = fullUserAgent + return config +} + +// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) removed +func AnonymousClientConfig(config *Config) *Config { + // copy only known safe fields + return &Config{ + Host: config.Host, + APIPath: config.APIPath, + Prefix: config.Prefix, + ContentConfig: config.ContentConfig, + TLSClientConfig: TLSClientConfig{ + Insecure: config.Insecure, + ServerName: config.ServerName, + CAFile: config.TLSClientConfig.CAFile, + CAData: config.TLSClientConfig.CAData, + }, + RateLimiter: config.RateLimiter, + UserAgent: config.UserAgent, + Transport: config.Transport, + WrapTransport: config.WrapTransport, + QPS: config.QPS, + Burst: config.Burst, + Timeout: config.Timeout, + } +} diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go new file mode 100644 index 000000000..cf8fbabfd --- /dev/null +++ b/vendor/k8s.io/client-go/rest/plugin.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "net/http" + "sync" + + "github.com/golang/glog" + + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +type AuthProvider interface { + // WrapTransport allows the plugin to create a modified RoundTripper that + // attaches authorization headers (or other info) to requests. + WrapTransport(http.RoundTripper) http.RoundTripper + // Login allows the plugin to initialize its configuration. It must not + // require direct user interaction. + Login() error +} + +// Factory generates an AuthProvider plugin. +// clusterAddress is the address of the current cluster. +// config is the initial configuration for this plugin. +// persister allows the plugin to save updated configuration. +type Factory func(clusterAddress string, config map[string]string, persister AuthProviderConfigPersister) (AuthProvider, error) + +// AuthProviderConfigPersister allows a plugin to persist configuration info +// for just itself. +type AuthProviderConfigPersister interface { + Persist(map[string]string) error +} + +// All registered auth provider plugins. +var pluginsLock sync.Mutex +var plugins = make(map[string]Factory) + +func RegisterAuthProviderPlugin(name string, plugin Factory) error { + pluginsLock.Lock() + defer pluginsLock.Unlock() + if _, found := plugins[name]; found { + return fmt.Errorf("Auth Provider Plugin %q was registered twice", name) + } + glog.V(4).Infof("Registered Auth Provider Plugin %q", name) + plugins[name] = plugin + return nil +} + +func GetAuthProvider(clusterAddress string, apc *clientcmdapi.AuthProviderConfig, persister AuthProviderConfigPersister) (AuthProvider, error) { + pluginsLock.Lock() + defer pluginsLock.Unlock() + p, ok := plugins[apc.Name] + if !ok { + return nil, fmt.Errorf("No Auth Provider found for name %q", apc.Name) + } + return p(clusterAddress, apc.Config, persister) +} diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go new file mode 100644 index 000000000..cfb4511ba --- /dev/null +++ b/vendor/k8s.io/client-go/rest/request.go @@ -0,0 +1,1250 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/pkg/api/v1" + restclientwatch "k8s.io/client-go/rest/watch" + "k8s.io/client-go/tools/metrics" + "k8s.io/client-go/util/flowcontrol" +) + +var ( + // specialParams lists parameters that are handled specially and which users of Request + // are therefore not allowed to set manually. + specialParams = sets.NewString("timeout") + + // longThrottleLatency defines threshold for logging requests. All requests being + // throttle for more than longThrottleLatency will be logged. + longThrottleLatency = 50 * time.Millisecond +) + +// HTTPClient is an interface for testing a request object. +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} + +// ResponseWrapper is an interface for getting a response. +// The response may be either accessed as a raw data (the whole output is put into memory) or as a stream. +type ResponseWrapper interface { + DoRaw() ([]byte, error) + Stream() (io.ReadCloser, error) +} + +// RequestConstructionError is returned when there's an error assembling a request. +type RequestConstructionError struct { + Err error +} + +// Error returns a textual description of 'r'. +func (r *RequestConstructionError) Error() string { + return fmt.Sprintf("request construction error: '%v'", r.Err) +} + +// Request allows for building up a request to a server in a chained fashion. +// Any errors are stored until the end of your call, so you only have to +// check once. +type Request struct { + // required + client HTTPClient + verb string + + baseURL *url.URL + content ContentConfig + serializers Serializers + + // generic components accessible via method setters + pathPrefix string + subpath string + params url.Values + headers http.Header + + // structural elements of the request that are part of the Kubernetes API conventions + namespace string + namespaceSet bool + resource string + resourceName string + subresource string + timeout time.Duration + + // output + err error + body io.Reader + + // This is only used for per-request timeouts, deadlines, and cancellations. + ctx context.Context + + backoffMgr BackoffManager + throttle flowcontrol.RateLimiter +} + +// NewRequest creates a new request helper object for accessing runtime.Objects on a server. +func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter) *Request { + if backoff == nil { + glog.V(2).Infof("Not implementing request backoff strategy.") + backoff = &NoBackoff{} + } + + pathPrefix := "/" + if baseURL != nil { + pathPrefix = path.Join(pathPrefix, baseURL.Path) + } + r := &Request{ + client: client, + verb: verb, + baseURL: baseURL, + pathPrefix: path.Join(pathPrefix, versionedAPIPath), + content: content, + serializers: serializers, + backoffMgr: backoff, + throttle: throttle, + } + switch { + case len(content.AcceptContentTypes) > 0: + r.SetHeader("Accept", content.AcceptContentTypes) + case len(content.ContentType) > 0: + r.SetHeader("Accept", content.ContentType+", */*") + } + return r +} + +// Prefix adds segments to the relative beginning to the request path. These +// items will be placed before the optional Namespace, Resource, or Name sections. +// Setting AbsPath will clear any previously set Prefix segments +func (r *Request) Prefix(segments ...string) *Request { + if r.err != nil { + return r + } + r.pathPrefix = path.Join(r.pathPrefix, path.Join(segments...)) + return r +} + +// Suffix appends segments to the end of the path. These items will be placed after the prefix and optional +// Namespace, Resource, or Name sections. +func (r *Request) Suffix(segments ...string) *Request { + if r.err != nil { + return r + } + r.subpath = path.Join(r.subpath, path.Join(segments...)) + return r +} + +// Resource sets the resource to access (/[ns//]) +func (r *Request) Resource(resource string) *Request { + if r.err != nil { + return r + } + if len(r.resource) != 0 { + r.err = fmt.Errorf("resource already set to %q, cannot change to %q", r.resource, resource) + return r + } + if msgs := IsValidPathSegmentName(resource); len(msgs) != 0 { + r.err = fmt.Errorf("invalid resource %q: %v", resource, msgs) + return r + } + r.resource = resource + return r +} + +// SubResource sets a sub-resource path which can be multiple segments segment after the resource +// name but before the suffix. +func (r *Request) SubResource(subresources ...string) *Request { + if r.err != nil { + return r + } + subresource := path.Join(subresources...) + if len(r.subresource) != 0 { + r.err = fmt.Errorf("subresource already set to %q, cannot change to %q", r.resource, subresource) + return r + } + for _, s := range subresources { + if msgs := IsValidPathSegmentName(s); len(msgs) != 0 { + r.err = fmt.Errorf("invalid subresource %q: %v", s, msgs) + return r + } + } + r.subresource = subresource + return r +} + +// Name sets the name of a resource to access (/[ns//]) +func (r *Request) Name(resourceName string) *Request { + if r.err != nil { + return r + } + if len(resourceName) == 0 { + r.err = fmt.Errorf("resource name may not be empty") + return r + } + if len(r.resourceName) != 0 { + r.err = fmt.Errorf("resource name already set to %q, cannot change to %q", r.resourceName, resourceName) + return r + } + if msgs := IsValidPathSegmentName(resourceName); len(msgs) != 0 { + r.err = fmt.Errorf("invalid resource name %q: %v", resourceName, msgs) + return r + } + r.resourceName = resourceName + return r +} + +// Namespace applies the namespace scope to a request (/[ns//]) +func (r *Request) Namespace(namespace string) *Request { + if r.err != nil { + return r + } + if r.namespaceSet { + r.err = fmt.Errorf("namespace already set to %q, cannot change to %q", r.namespace, namespace) + return r + } + if msgs := IsValidPathSegmentName(namespace); len(msgs) != 0 { + r.err = fmt.Errorf("invalid namespace %q: %v", namespace, msgs) + return r + } + r.namespaceSet = true + r.namespace = namespace + return r +} + +// NamespaceIfScoped is a convenience function to set a namespace if scoped is true +func (r *Request) NamespaceIfScoped(namespace string, scoped bool) *Request { + if scoped { + return r.Namespace(namespace) + } + return r +} + +// AbsPath overwrites an existing path with the segments provided. Trailing slashes are preserved +// when a single segment is passed. +func (r *Request) AbsPath(segments ...string) *Request { + if r.err != nil { + return r + } + r.pathPrefix = path.Join(r.baseURL.Path, path.Join(segments...)) + if len(segments) == 1 && (len(r.baseURL.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") { + // preserve any trailing slashes for legacy behavior + r.pathPrefix += "/" + } + return r +} + +// RequestURI overwrites existing path and parameters with the value of the provided server relative +// URI. Some parameters (those in specialParameters) cannot be overwritten. +func (r *Request) RequestURI(uri string) *Request { + if r.err != nil { + return r + } + locator, err := url.Parse(uri) + if err != nil { + r.err = err + return r + } + r.pathPrefix = locator.Path + if len(locator.Query()) > 0 { + if r.params == nil { + r.params = make(url.Values) + } + for k, v := range locator.Query() { + r.params[k] = v + } + } + return r +} + +const ( + // A constant that clients can use to refer in a field selector to the object name field. + // Will be automatically emitted as the correct name for the API version. + nodeUnschedulable = "spec.unschedulable" + objectNameField = "metadata.name" + podHost = "spec.nodeName" + podStatus = "status.phase" + secretType = "type" + + eventReason = "reason" + eventSource = "source" + eventType = "type" + eventInvolvedKind = "involvedObject.kind" + eventInvolvedNamespace = "involvedObject.namespace" + eventInvolvedName = "involvedObject.name" + eventInvolvedUID = "involvedObject.uid" + eventInvolvedAPIVersion = "involvedObject.apiVersion" + eventInvolvedResourceVersion = "involvedObject.resourceVersion" + eventInvolvedFieldPath = "involvedObject.fieldPath" +) + +type clientFieldNameToAPIVersionFieldName map[string]string + +func (c clientFieldNameToAPIVersionFieldName) filterField(field, value string) (newField, newValue string, err error) { + newFieldName, ok := c[field] + if !ok { + return "", "", fmt.Errorf("%v - %v - no field mapping defined", field, value) + } + return newFieldName, value, nil +} + +type resourceTypeToFieldMapping map[string]clientFieldNameToAPIVersionFieldName + +func (r resourceTypeToFieldMapping) filterField(resourceType, field, value string) (newField, newValue string, err error) { + fMapping, ok := r[resourceType] + if !ok { + return "", "", fmt.Errorf("%v - %v - %v - no field mapping defined", resourceType, field, value) + } + return fMapping.filterField(field, value) +} + +type versionToResourceToFieldMapping map[schema.GroupVersion]resourceTypeToFieldMapping + +// filterField transforms the given field/value selector for the given groupVersion and resource +func (v versionToResourceToFieldMapping) filterField(groupVersion *schema.GroupVersion, resourceType, field, value string) (newField, newValue string, err error) { + rMapping, ok := v[*groupVersion] + if !ok { + // no groupVersion overrides registered, default to identity mapping + return field, value, nil + } + newField, newValue, err = rMapping.filterField(resourceType, field, value) + if err != nil { + // no groupVersionResource overrides registered, default to identity mapping + return field, value, nil + } + return newField, newValue, nil +} + +var fieldMappings = versionToResourceToFieldMapping{ + v1.SchemeGroupVersion: resourceTypeToFieldMapping{ + "nodes": clientFieldNameToAPIVersionFieldName{ + objectNameField: objectNameField, + nodeUnschedulable: nodeUnschedulable, + }, + "pods": clientFieldNameToAPIVersionFieldName{ + objectNameField: objectNameField, + podHost: podHost, + podStatus: podStatus, + }, + "secrets": clientFieldNameToAPIVersionFieldName{ + secretType: secretType, + }, + "serviceAccounts": clientFieldNameToAPIVersionFieldName{ + objectNameField: objectNameField, + }, + "endpoints": clientFieldNameToAPIVersionFieldName{ + objectNameField: objectNameField, + }, + "events": clientFieldNameToAPIVersionFieldName{ + objectNameField: objectNameField, + eventReason: eventReason, + eventSource: eventSource, + eventType: eventType, + eventInvolvedKind: eventInvolvedKind, + eventInvolvedNamespace: eventInvolvedNamespace, + eventInvolvedName: eventInvolvedName, + eventInvolvedUID: eventInvolvedUID, + eventInvolvedAPIVersion: eventInvolvedAPIVersion, + eventInvolvedResourceVersion: eventInvolvedResourceVersion, + eventInvolvedFieldPath: eventInvolvedFieldPath, + }, + }, +} + +// FieldsSelectorParam adds the given selector as a query parameter with the name paramName. +func (r *Request) FieldsSelectorParam(s fields.Selector) *Request { + if r.err != nil { + return r + } + if s == nil { + return r + } + if s.Empty() { + return r + } + s2, err := s.Transform(func(field, value string) (newField, newValue string, err error) { + return fieldMappings.filterField(r.content.GroupVersion, r.resource, field, value) + }) + if err != nil { + r.err = err + return r + } + return r.setParam(metav1.FieldSelectorQueryParam(r.content.GroupVersion.String()), s2.String()) +} + +// LabelsSelectorParam adds the given selector as a query parameter +func (r *Request) LabelsSelectorParam(s labels.Selector) *Request { + if r.err != nil { + return r + } + if s == nil { + return r + } + if s.Empty() { + return r + } + return r.setParam(metav1.LabelSelectorQueryParam(r.content.GroupVersion.String()), s.String()) +} + +// UintParam creates a query parameter with the given value. +func (r *Request) UintParam(paramName string, u uint64) *Request { + if r.err != nil { + return r + } + return r.setParam(paramName, strconv.FormatUint(u, 10)) +} + +// Param creates a query parameter with the given string value. +func (r *Request) Param(paramName, s string) *Request { + if r.err != nil { + return r + } + return r.setParam(paramName, s) +} + +// VersionedParams will take the provided object, serialize it to a map[string][]string using the +// implicit RESTClient API version and the default parameter codec, and then add those as parameters +// to the request. Use this to provide versioned query parameters from client libraries. +func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request { + if r.err != nil { + return r + } + params, err := codec.EncodeParameters(obj, *r.content.GroupVersion) + if err != nil { + r.err = err + return r + } + for k, v := range params { + for _, value := range v { + // TODO: Move it to setParam method, once we get rid of + // FieldSelectorParam & LabelSelectorParam methods. + if k == metav1.LabelSelectorQueryParam(r.content.GroupVersion.String()) && value == "" { + // Don't set an empty selector for backward compatibility. + // Since there is no way to get the difference between empty + // and unspecified string, we don't set it to avoid having + // labelSelector= param in every request. + continue + } + if k == metav1.FieldSelectorQueryParam(r.content.GroupVersion.String()) { + if len(value) == 0 { + // Don't set an empty selector for backward compatibility. + // Since there is no way to get the difference between empty + // and unspecified string, we don't set it to avoid having + // fieldSelector= param in every request. + continue + } + // TODO: Filtering should be handled somewhere else. + selector, err := fields.ParseSelector(value) + if err != nil { + r.err = fmt.Errorf("unparsable field selector: %v", err) + return r + } + filteredSelector, err := selector.Transform( + func(field, value string) (newField, newValue string, err error) { + return fieldMappings.filterField(r.content.GroupVersion, r.resource, field, value) + }) + if err != nil { + r.err = fmt.Errorf("untransformable field selector: %v", err) + return r + } + value = filteredSelector.String() + } + + r.setParam(k, value) + } + } + return r +} + +func (r *Request) setParam(paramName, value string) *Request { + if specialParams.Has(paramName) { + r.err = fmt.Errorf("must set %v through the corresponding function, not directly.", paramName) + return r + } + if r.params == nil { + r.params = make(url.Values) + } + r.params[paramName] = append(r.params[paramName], value) + return r +} + +func (r *Request) SetHeader(key, value string) *Request { + if r.headers == nil { + r.headers = http.Header{} + } + r.headers.Set(key, value) + return r +} + +// Timeout makes the request use the given duration as a timeout. Sets the "timeout" +// parameter. +func (r *Request) Timeout(d time.Duration) *Request { + if r.err != nil { + return r + } + r.timeout = d + return r +} + +// Body makes the request use obj as the body. Optional. +// If obj is a string, try to read a file of that name. +// If obj is a []byte, send it directly. +// If obj is an io.Reader, use it directly. +// If obj is a runtime.Object, marshal it correctly, and set Content-Type header. +// If obj is a runtime.Object and nil, do nothing. +// Otherwise, set an error. +func (r *Request) Body(obj interface{}) *Request { + if r.err != nil { + return r + } + switch t := obj.(type) { + case string: + data, err := ioutil.ReadFile(t) + if err != nil { + r.err = err + return r + } + glogBody("Request Body", data) + r.body = bytes.NewReader(data) + case []byte: + glogBody("Request Body", t) + r.body = bytes.NewReader(t) + case io.Reader: + r.body = t + case runtime.Object: + // callers may pass typed interface pointers, therefore we must check nil with reflection + if reflect.ValueOf(t).IsNil() { + return r + } + data, err := runtime.Encode(r.serializers.Encoder, t) + if err != nil { + r.err = err + return r + } + glogBody("Request Body", data) + r.body = bytes.NewReader(data) + r.SetHeader("Content-Type", r.content.ContentType) + default: + r.err = fmt.Errorf("unknown type used for body: %+v", obj) + } + return r +} + +// Context adds a context to the request. Contexts are only used for +// timeouts, deadlines, and cancellations. +func (r *Request) Context(ctx context.Context) *Request { + r.ctx = ctx + return r +} + +// URL returns the current working URL. +func (r *Request) URL() *url.URL { + p := r.pathPrefix + if r.namespaceSet && len(r.namespace) > 0 { + p = path.Join(p, "namespaces", r.namespace) + } + if len(r.resource) != 0 { + p = path.Join(p, strings.ToLower(r.resource)) + } + // Join trims trailing slashes, so preserve r.pathPrefix's trailing slash for backwards compatibility if nothing was changed + if len(r.resourceName) != 0 || len(r.subpath) != 0 || len(r.subresource) != 0 { + p = path.Join(p, r.resourceName, r.subresource, r.subpath) + } + + finalURL := &url.URL{} + if r.baseURL != nil { + *finalURL = *r.baseURL + } + finalURL.Path = p + + query := url.Values{} + for key, values := range r.params { + for _, value := range values { + query.Add(key, value) + } + } + + // timeout is handled specially here. + if r.timeout != 0 { + query.Set("timeout", r.timeout.String()) + } + finalURL.RawQuery = query.Encode() + return finalURL +} + +// finalURLTemplate is similar to URL(), but will make all specific parameter values equal +// - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query +// parameters will be reset. This creates a copy of the request so as not to change the +// underyling object. This means some useful request info (like the types of field +// selectors in use) will be lost. +// TODO: preserve field selector keys +func (r Request) finalURLTemplate() url.URL { + if len(r.resourceName) != 0 { + r.resourceName = "{name}" + } + if r.namespaceSet && len(r.namespace) != 0 { + r.namespace = "{namespace}" + } + newParams := url.Values{} + v := []string{"{value}"} + for k := range r.params { + newParams[k] = v + } + r.params = newParams + url := r.URL() + return *url +} + +func (r *Request) tryThrottle() { + now := time.Now() + if r.throttle != nil { + r.throttle.Accept() + } + if latency := time.Since(now); latency > longThrottleLatency { + glog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) + } +} + +// Watch attempts to begin watching the requested location. +// Returns a watch.Interface, or an error. +func (r *Request) Watch() (watch.Interface, error) { + // We specifically don't want to rate limit watches, so we + // don't use r.throttle here. + if r.err != nil { + return nil, r.err + } + if r.serializers.Framer == nil { + return nil, fmt.Errorf("watching resources is not possible with this client (content-type: %s)", r.content.ContentType) + } + + url := r.URL().String() + req, err := http.NewRequest(r.verb, url, r.body) + if err != nil { + return nil, err + } + if r.ctx != nil { + req = req.WithContext(r.ctx) + } + req.Header = r.headers + client := r.client + if client == nil { + client = http.DefaultClient + } + r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + resp, err := client.Do(req) + updateURLMetrics(r, resp, err) + if r.baseURL != nil { + if err != nil { + r.backoffMgr.UpdateBackoff(r.baseURL, err, 0) + } else { + r.backoffMgr.UpdateBackoff(r.baseURL, err, resp.StatusCode) + } + } + if err != nil { + // The watch stream mechanism handles many common partial data errors, so closed + // connections can be retried in many cases. + if net.IsProbableEOF(err) { + return watch.NewEmptyWatch(), nil + } + return nil, err + } + if resp.StatusCode != http.StatusOK { + defer resp.Body.Close() + if result := r.transformResponse(resp, req); result.err != nil { + return nil, result.err + } + return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode) + } + framer := r.serializers.Framer.NewFrameReader(resp.Body) + decoder := streaming.NewDecoder(framer, r.serializers.StreamingSerializer) + return watch.NewStreamWatcher(restclientwatch.NewDecoder(decoder, r.serializers.Decoder)), nil +} + +// updateURLMetrics is a convenience function for pushing metrics. +// It also handles corner cases for incomplete/invalid request data. +func updateURLMetrics(req *Request, resp *http.Response, err error) { + url := "none" + if req.baseURL != nil { + url = req.baseURL.Host + } + + // Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric + // system so we just report them as ``. + if err != nil { + metrics.RequestResult.Increment("", req.verb, url) + } else { + //Metrics for failure codes + metrics.RequestResult.Increment(strconv.Itoa(resp.StatusCode), req.verb, url) + } +} + +// Stream formats and executes the request, and offers streaming of the response. +// Returns io.ReadCloser which could be used for streaming of the response, or an error +// Any non-2xx http status code causes an error. If we get a non-2xx code, we try to convert the body into an APIStatus object. +// If we can, we return that as an error. Otherwise, we create an error that lists the http status and the content of the response. +func (r *Request) Stream() (io.ReadCloser, error) { + if r.err != nil { + return nil, r.err + } + + r.tryThrottle() + + url := r.URL().String() + req, err := http.NewRequest(r.verb, url, nil) + if err != nil { + return nil, err + } + if r.ctx != nil { + req = req.WithContext(r.ctx) + } + req.Header = r.headers + client := r.client + if client == nil { + client = http.DefaultClient + } + r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + resp, err := client.Do(req) + updateURLMetrics(r, resp, err) + if r.baseURL != nil { + if err != nil { + r.backoffMgr.UpdateBackoff(r.URL(), err, 0) + } else { + r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode) + } + } + if err != nil { + return nil, err + } + + switch { + case (resp.StatusCode >= 200) && (resp.StatusCode < 300): + return resp.Body, nil + + default: + // ensure we close the body before returning the error + defer resp.Body.Close() + + result := r.transformResponse(resp, req) + err := result.Error() + if err == nil { + err = fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body)) + } + return nil, err + } +} + +// request connects to the server and invokes the provided function when a server response is +// received. It handles retry behavior and up front validation of requests. It will invoke +// fn at most once. It will return an error if a problem occurred prior to connecting to the +// server - the provided function is responsible for handling server errors. +func (r *Request) request(fn func(*http.Request, *http.Response)) error { + //Metrics for total request latency + start := time.Now() + defer func() { + metrics.RequestLatency.Observe(r.verb, r.finalURLTemplate(), time.Since(start)) + }() + + if r.err != nil { + glog.V(4).Infof("Error in request: %v", r.err) + return r.err + } + + // TODO: added to catch programmer errors (invoking operations with an object with an empty namespace) + if (r.verb == "GET" || r.verb == "PUT" || r.verb == "DELETE") && r.namespaceSet && len(r.resourceName) > 0 && len(r.namespace) == 0 { + return fmt.Errorf("an empty namespace may not be set when a resource name is provided") + } + if (r.verb == "POST") && r.namespaceSet && len(r.namespace) == 0 { + return fmt.Errorf("an empty namespace may not be set during creation") + } + + client := r.client + if client == nil { + client = http.DefaultClient + } + + // Right now we make about ten retry attempts if we get a Retry-After response. + // TODO: Change to a timeout based approach. + maxRetries := 10 + retries := 0 + for { + url := r.URL().String() + req, err := http.NewRequest(r.verb, url, r.body) + if err != nil { + return err + } + if r.ctx != nil { + req = req.WithContext(r.ctx) + } + req.Header = r.headers + + r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + if retries > 0 { + // We are retrying the request that we already send to apiserver + // at least once before. + // This request should also be throttled with the client-internal throttler. + r.tryThrottle() + } + resp, err := client.Do(req) + updateURLMetrics(r, resp, err) + if err != nil { + r.backoffMgr.UpdateBackoff(r.URL(), err, 0) + } else { + r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode) + } + if err != nil { + // "Connection reset by peer" is usually a transient error. + // Thus in case of "GET" operations, we simply retry it. + // We are not automatically retrying "write" operations, as + // they are not idempotent. + if !net.IsConnectionReset(err) || r.verb != "GET" { + return err + } + // For the purpose of retry, we set the artificial "retry-after" response. + // TODO: Should we clean the original response if it exists? + resp = &http.Response{ + StatusCode: http.StatusInternalServerError, + Header: http.Header{"Retry-After": []string{"1"}}, + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + + done := func() bool { + // Ensure the response body is fully read and closed + // before we reconnect, so that we reuse the same TCP + // connection. + defer func() { + const maxBodySlurpSize = 2 << 10 + if resp.ContentLength <= maxBodySlurpSize { + io.Copy(ioutil.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize}) + } + resp.Body.Close() + }() + + retries++ + if seconds, wait := checkWait(resp); wait && retries < maxRetries { + if seeker, ok := r.body.(io.Seeker); ok && r.body != nil { + _, err := seeker.Seek(0, 0) + if err != nil { + glog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) + fn(req, resp) + return true + } + } + + glog.V(4).Infof("Got a Retry-After %s response for attempt %d to %v", seconds, retries, url) + r.backoffMgr.Sleep(time.Duration(seconds) * time.Second) + return false + } + fn(req, resp) + return true + }() + if done { + return nil + } + } +} + +// Do formats and executes the request. Returns a Result object for easy response +// processing. +// +// Error type: +// * If the request can't be constructed, or an error happened earlier while building its +// arguments: *RequestConstructionError +// * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError +// * http.Client.Do errors are returned directly. +func (r *Request) Do() Result { + r.tryThrottle() + + var result Result + err := r.request(func(req *http.Request, resp *http.Response) { + result = r.transformResponse(resp, req) + }) + if err != nil { + return Result{err: err} + } + return result +} + +// DoRaw executes the request but does not process the response body. +func (r *Request) DoRaw() ([]byte, error) { + r.tryThrottle() + + var result Result + err := r.request(func(req *http.Request, resp *http.Response) { + result.body, result.err = ioutil.ReadAll(resp.Body) + glogBody("Response Body", result.body) + if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent { + result.err = r.transformUnstructuredResponseError(resp, req, result.body) + } + }) + if err != nil { + return nil, err + } + return result.body, result.err +} + +// transformResponse converts an API response into a structured API object +func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result { + var body []byte + if resp.Body != nil { + if data, err := ioutil.ReadAll(resp.Body); err == nil { + body = data + } + } + + glogBody("Response Body", body) + + // verify the content type is accurate + contentType := resp.Header.Get("Content-Type") + decoder := r.serializers.Decoder + if len(contentType) > 0 && (decoder == nil || (len(r.content.ContentType) > 0 && contentType != r.content.ContentType)) { + mediaType, params, err := mime.ParseMediaType(contentType) + if err != nil { + return Result{err: errors.NewInternalError(err)} + } + decoder, err = r.serializers.RenegotiatedDecoder(mediaType, params) + if err != nil { + // if we fail to negotiate a decoder, treat this as an unstructured error + switch { + case resp.StatusCode == http.StatusSwitchingProtocols: + // no-op, we've been upgraded + case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent: + return Result{err: r.transformUnstructuredResponseError(resp, req, body)} + } + return Result{ + body: body, + contentType: contentType, + statusCode: resp.StatusCode, + } + } + } + + switch { + case resp.StatusCode == http.StatusSwitchingProtocols: + // no-op, we've been upgraded + case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent: + // calculate an unstructured error from the response which the Result object may use if the caller + // did not return a structured error. + retryAfter, _ := retryAfterSeconds(resp) + err := r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter) + return Result{ + body: body, + contentType: contentType, + statusCode: resp.StatusCode, + decoder: decoder, + err: err, + } + } + + return Result{ + body: body, + contentType: contentType, + statusCode: resp.StatusCode, + decoder: decoder, + } +} + +// glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against +// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine +// whether the body is printable. +func glogBody(prefix string, body []byte) { + if glog.V(8) { + if bytes.IndexFunc(body, func(r rune) bool { + return r < 0x0a + }) != -1 { + glog.Infof("%s:\n%s", prefix, hex.Dump(body)) + } else { + glog.Infof("%s: %s", prefix, string(body)) + } + } +} + +// maxUnstructuredResponseTextBytes is an upper bound on how much output to include in the unstructured error. +const maxUnstructuredResponseTextBytes = 2048 + +// transformUnstructuredResponseError handles an error from the server that is not in a structured form. +// It is expected to transform any response that is not recognizable as a clear server sent error from the +// K8S API using the information provided with the request. In practice, HTTP proxies and client libraries +// introduce a level of uncertainty to the responses returned by servers that in common use result in +// unexpected responses. The rough structure is: +// +// 1. Assume the server sends you something sane - JSON + well defined error objects + proper codes +// - this is the happy path +// - when you get this output, trust what the server sends +// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to +// generate a reasonable facsimile of the original failure. +// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above +// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error +// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected +// initial contact, the presence of mismatched body contents from posted content types +// - Give these a separate distinct error type and capture as much as possible of the original message +// +// TODO: introduce transformation of generic http.Client.Do() errors that separates 4. +func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error { + if body == nil && resp.Body != nil { + if data, err := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: maxUnstructuredResponseTextBytes}); err == nil { + body = data + } + } + retryAfter, _ := retryAfterSeconds(resp) + return r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter) +} + +// newUnstructuredResponseError instantiates the appropriate generic error for the provided input. It also logs the body. +func (r *Request) newUnstructuredResponseError(body []byte, isTextResponse bool, statusCode int, method string, retryAfter int) error { + // cap the amount of output we create + if len(body) > maxUnstructuredResponseTextBytes { + body = body[:maxUnstructuredResponseTextBytes] + } + + message := "unknown" + if isTextResponse { + message = strings.TrimSpace(string(body)) + } + var groupResource schema.GroupResource + if len(r.resource) > 0 { + groupResource.Group = r.content.GroupVersion.Group + groupResource.Resource = r.resource + } + return errors.NewGenericServerResponse( + statusCode, + method, + groupResource, + r.resourceName, + message, + retryAfter, + true, + ) +} + +// isTextResponse returns true if the response appears to be a textual media type. +func isTextResponse(resp *http.Response) bool { + contentType := resp.Header.Get("Content-Type") + if len(contentType) == 0 { + return true + } + media, _, err := mime.ParseMediaType(contentType) + if err != nil { + return false + } + return strings.HasPrefix(media, "text/") +} + +// checkWait returns true along with a number of seconds if the server instructed us to wait +// before retrying. +func checkWait(resp *http.Response) (int, bool) { + switch r := resp.StatusCode; { + // any 500 error code and 429 can trigger a wait + case r == errors.StatusTooManyRequests, r >= 500: + default: + return 0, false + } + i, ok := retryAfterSeconds(resp) + return i, ok +} + +// retryAfterSeconds returns the value of the Retry-After header and true, or 0 and false if +// the header was missing or not a valid number. +func retryAfterSeconds(resp *http.Response) (int, bool) { + if h := resp.Header.Get("Retry-After"); len(h) > 0 { + if i, err := strconv.Atoi(h); err == nil { + return i, true + } + } + return 0, false +} + +// Result contains the result of calling Request.Do(). +type Result struct { + body []byte + contentType string + err error + statusCode int + + decoder runtime.Decoder +} + +// Raw returns the raw result. +func (r Result) Raw() ([]byte, error) { + return r.body, r.err +} + +// Get returns the result as an object, which means it passes through the decoder. +// If the returned object is of type Status and has .Status != StatusSuccess, the +// additional information in Status will be used to enrich the error. +func (r Result) Get() (runtime.Object, error) { + if r.err != nil { + // Check whether the result has a Status object in the body and prefer that. + return nil, r.Error() + } + if r.decoder == nil { + return nil, fmt.Errorf("serializer for %s doesn't exist", r.contentType) + } + + // decode, but if the result is Status return that as an error instead. + out, _, err := r.decoder.Decode(r.body, nil, nil) + if err != nil { + return nil, err + } + switch t := out.(type) { + case *metav1.Status: + // any status besides StatusSuccess is considered an error. + if t.Status != metav1.StatusSuccess { + return nil, errors.FromObject(t) + } + } + return out, nil +} + +// StatusCode returns the HTTP status code of the request. (Only valid if no +// error was returned.) +func (r Result) StatusCode(statusCode *int) Result { + *statusCode = r.statusCode + return r +} + +// Into stores the result into obj, if possible. If obj is nil it is ignored. +// If the returned object is of type Status and has .Status != StatusSuccess, the +// additional information in Status will be used to enrich the error. +func (r Result) Into(obj runtime.Object) error { + if r.err != nil { + // Check whether the result has a Status object in the body and prefer that. + return r.Error() + } + if r.decoder == nil { + return fmt.Errorf("serializer for %s doesn't exist", r.contentType) + } + if len(r.body) == 0 { + return fmt.Errorf("0-length response") + } + + out, _, err := r.decoder.Decode(r.body, nil, obj) + if err != nil || out == obj { + return err + } + // if a different object is returned, see if it is Status and avoid double decoding + // the object. + switch t := out.(type) { + case *metav1.Status: + // any status besides StatusSuccess is considered an error. + if t.Status != metav1.StatusSuccess { + return errors.FromObject(t) + } + } + return nil +} + +// WasCreated updates the provided bool pointer to whether the server returned +// 201 created or a different response. +func (r Result) WasCreated(wasCreated *bool) Result { + *wasCreated = r.statusCode == http.StatusCreated + return r +} + +// Error returns the error executing the request, nil if no error occurred. +// If the returned object is of type Status and has Status != StatusSuccess, the +// additional information in Status will be used to enrich the error. +// See the Request.Do() comment for what errors you might get. +func (r Result) Error() error { + // if we have received an unexpected server error, and we have a body and decoder, we can try to extract + // a Status object. + if r.err == nil || !errors.IsUnexpectedServerError(r.err) || len(r.body) == 0 || r.decoder == nil { + return r.err + } + + // attempt to convert the body into a Status object + // to be backwards compatible with old servers that do not return a version, default to "v1" + out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil) + if err != nil { + glog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) + return r.err + } + switch t := out.(type) { + case *metav1.Status: + // because we default the kind, we *must* check for StatusFailure + if t.Status == metav1.StatusFailure { + return errors.FromObject(t) + } + } + return r.err +} + +// NameMayNotBe specifies strings that cannot be used as names specified as path segments (like the REST API or etcd store) +var NameMayNotBe = []string{".", ".."} + +// NameMayNotContain specifies substrings that cannot be used in names specified as path segments (like the REST API or etcd store) +var NameMayNotContain = []string{"/", "%"} + +// IsValidPathSegmentName validates the name can be safely encoded as a path segment +func IsValidPathSegmentName(name string) []string { + for _, illegalName := range NameMayNotBe { + if name == illegalName { + return []string{fmt.Sprintf(`may not be '%s'`, illegalName)} + } + } + + var errors []string + for _, illegalContent := range NameMayNotContain { + if strings.Contains(name, illegalContent) { + errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent)) + } + } + + return errors +} + +// IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment +// It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid +func IsValidPathSegmentPrefix(name string) []string { + var errors []string + for _, illegalContent := range NameMayNotContain { + if strings.Contains(name, illegalContent) { + errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent)) + } + } + + return errors +} + +// ValidatePathSegmentName validates the name can be safely encoded as a path segment +func ValidatePathSegmentName(name string, prefix bool) []string { + if prefix { + return IsValidPathSegmentPrefix(name) + } else { + return IsValidPathSegmentName(name) + } +} diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go new file mode 100644 index 000000000..ba43752bc --- /dev/null +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -0,0 +1,99 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "crypto/tls" + "net/http" + + "k8s.io/client-go/transport" +) + +// TLSConfigFor returns a tls.Config that will provide the transport level security defined +// by the provided Config. Will return nil if no transport level security is requested. +func TLSConfigFor(config *Config) (*tls.Config, error) { + cfg, err := config.TransportConfig() + if err != nil { + return nil, err + } + return transport.TLSConfigFor(cfg) +} + +// TransportFor returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. Will return the +// default http.DefaultTransport if no special case behavior is needed. +func TransportFor(config *Config) (http.RoundTripper, error) { + cfg, err := config.TransportConfig() + if err != nil { + return nil, err + } + return transport.New(cfg) +} + +// HTTPWrappersForConfig wraps a round tripper with any relevant layered behavior from the +// config. Exposed to allow more clients that need HTTP-like behavior but then must hijack +// the underlying connection (like WebSocket or HTTP2 clients). Pure HTTP clients should use +// the higher level TransportFor or RESTClientFor methods. +func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) { + cfg, err := config.TransportConfig() + if err != nil { + return nil, err + } + return transport.HTTPWrappersForConfig(cfg, rt) +} + +// TransportConfig converts a client config to an appropriate transport config. +func (c *Config) TransportConfig() (*transport.Config, error) { + wt := c.WrapTransport + if c.AuthProvider != nil { + provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister) + if err != nil { + return nil, err + } + if wt != nil { + previousWT := wt + wt = func(rt http.RoundTripper) http.RoundTripper { + return provider.WrapTransport(previousWT(rt)) + } + } else { + wt = provider.WrapTransport + } + } + return &transport.Config{ + UserAgent: c.UserAgent, + Transport: c.Transport, + WrapTransport: wt, + TLS: transport.TLSConfig{ + Insecure: c.Insecure, + ServerName: c.ServerName, + CAFile: c.CAFile, + CAData: c.CAData, + CertFile: c.CertFile, + CertData: c.CertData, + KeyFile: c.KeyFile, + KeyData: c.KeyData, + }, + Username: c.Username, + Password: c.Password, + BearerToken: c.BearerToken, + Impersonate: transport.ImpersonationConfig{ + UserName: c.Impersonate.UserName, + Groups: c.Impersonate.Groups, + Extra: c.Impersonate.Extra, + }, + }, nil +} diff --git a/vendor/k8s.io/client-go/rest/url_utils.go b/vendor/k8s.io/client-go/rest/url_utils.go new file mode 100644 index 000000000..14f94650a --- /dev/null +++ b/vendor/k8s.io/client-go/rest/url_utils.go @@ -0,0 +1,90 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "net/url" + "path" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// DefaultServerURL converts a host, host:port, or URL string to the default base server API path +// to use with a Client at a given API version following the standard conventions for a +// Kubernetes API. +func DefaultServerURL(host, apiPath string, groupVersion schema.GroupVersion, defaultTLS bool) (*url.URL, string, error) { + if host == "" { + return nil, "", fmt.Errorf("host must be a URL or a host:port pair") + } + base := host + hostURL, err := url.Parse(base) + if err != nil || hostURL.Scheme == "" || hostURL.Host == "" { + scheme := "http://" + if defaultTLS { + scheme = "https://" + } + hostURL, err = url.Parse(scheme + base) + if err != nil { + return nil, "", err + } + if hostURL.Path != "" && hostURL.Path != "/" { + return nil, "", fmt.Errorf("host must be a URL or a host:port pair: %q", base) + } + } + + // hostURL.Path is optional; a non-empty Path is treated as a prefix that is to be applied to + // all URIs used to access the host. this is useful when there's a proxy in front of the + // apiserver that has relocated the apiserver endpoints, forwarding all requests from, for + // example, /a/b/c to the apiserver. in this case the Path should be /a/b/c. + // + // if running without a frontend proxy (that changes the location of the apiserver), then + // hostURL.Path should be blank. + // + // versionedAPIPath, a path relative to baseURL.Path, points to a versioned API base + versionedAPIPath := path.Join("/", apiPath) + + // Add the version to the end of the path + if len(groupVersion.Group) > 0 { + versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Group, groupVersion.Version) + + } else { + versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Version) + + } + + return hostURL, versionedAPIPath, nil +} + +// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It +// requires Host and Version to be set prior to being called. +func defaultServerUrlFor(config *Config) (*url.URL, string, error) { + // TODO: move the default to secure when the apiserver supports TLS by default + // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." + hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0 + hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0 + defaultTLS := hasCA || hasCert || config.Insecure + host := config.Host + if host == "" { + host = "localhost" + } + + if config.GroupVersion != nil { + return DefaultServerURL(host, config.APIPath, *config.GroupVersion, defaultTLS) + } + return DefaultServerURL(host, config.APIPath, schema.GroupVersion{}, defaultTLS) +} diff --git a/vendor/k8s.io/client-go/rest/urlbackoff.go b/vendor/k8s.io/client-go/rest/urlbackoff.go new file mode 100644 index 000000000..eff848abc --- /dev/null +++ b/vendor/k8s.io/client-go/rest/urlbackoff.go @@ -0,0 +1,107 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "net/url" + "time" + + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/flowcontrol" +) + +// Set of resp. Codes that we backoff for. +// In general these should be errors that indicate a server is overloaded. +// These shouldn't be configured by any user, we set them based on conventions +// described in +var serverIsOverloadedSet = sets.NewInt(429) +var maxResponseCode = 499 + +type BackoffManager interface { + UpdateBackoff(actualUrl *url.URL, err error, responseCode int) + CalculateBackoff(actualUrl *url.URL) time.Duration + Sleep(d time.Duration) +} + +// URLBackoff struct implements the semantics on top of Backoff which +// we need for URL specific exponential backoff. +type URLBackoff struct { + // Uses backoff as underlying implementation. + Backoff *flowcontrol.Backoff +} + +// NoBackoff is a stub implementation, can be used for mocking or else as a default. +type NoBackoff struct { +} + +func (n *NoBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) { + // do nothing. +} + +func (n *NoBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration { + return 0 * time.Second +} + +func (n *NoBackoff) Sleep(d time.Duration) { + time.Sleep(d) +} + +// Disable makes the backoff trivial, i.e., sets it to zero. This might be used +// by tests which want to run 1000s of mock requests without slowing down. +func (b *URLBackoff) Disable() { + glog.V(4).Infof("Disabling backoff strategy") + b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second) +} + +// baseUrlKey returns the key which urls will be mapped to. +// For example, 127.0.0.1:8080/api/v2/abcde -> 127.0.0.1:8080. +func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string { + // Simple implementation for now, just the host. + // We may backoff specific paths (i.e. "pods") differentially + // in the future. + host, err := url.Parse(rawurl.String()) + if err != nil { + glog.V(4).Infof("Error extracting url: %v", rawurl) + panic("bad url!") + } + return host.Host +} + +// UpdateBackoff updates backoff metadata +func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) { + // range for retry counts that we store is [0,13] + if responseCode > maxResponseCode || serverIsOverloadedSet.Has(responseCode) { + b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now()) + return + } else if responseCode >= 300 || err != nil { + glog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err) + } + + //If we got this far, there is no backoff required for this URL anymore. + b.Backoff.Reset(b.baseUrlKey(actualUrl)) +} + +// CalculateBackoff takes a url and back's off exponentially, +// based on its knowledge of existing failures. +func (b *URLBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration { + return b.Backoff.Get(b.baseUrlKey(actualUrl)) +} + +func (b *URLBackoff) Sleep(d time.Duration) { + b.Backoff.Clock.Sleep(d) +} diff --git a/vendor/k8s.io/client-go/rest/versions.go b/vendor/k8s.io/client-go/rest/versions.go new file mode 100644 index 000000000..9d41812f2 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/versions.go @@ -0,0 +1,88 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "encoding/json" + "fmt" + "net/http" + "path" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + legacyAPIPath = "/api" + defaultAPIPath = "/apis" +) + +// TODO: Is this obsoleted by the discovery client? + +// ServerAPIVersions returns the GroupVersions supported by the API server. +// It creates a RESTClient based on the passed in config, but it doesn't rely +// on the Version and Codec of the config, because it uses AbsPath and +// takes the raw response. +func ServerAPIVersions(c *Config) (groupVersions []string, err error) { + transport, err := TransportFor(c) + if err != nil { + return nil, err + } + client := http.Client{Transport: transport} + + configCopy := *c + configCopy.GroupVersion = nil + configCopy.APIPath = "" + baseURL, _, err := defaultServerUrlFor(&configCopy) + if err != nil { + return nil, err + } + // Get the groupVersions exposed at /api + originalPath := baseURL.Path + baseURL.Path = path.Join(originalPath, legacyAPIPath) + resp, err := client.Get(baseURL.String()) + if err != nil { + return nil, err + } + var v metav1.APIVersions + defer resp.Body.Close() + err = json.NewDecoder(resp.Body).Decode(&v) + if err != nil { + return nil, fmt.Errorf("unexpected error: %v", err) + } + + groupVersions = append(groupVersions, v.Versions...) + // Get the groupVersions exposed at /apis + baseURL.Path = path.Join(originalPath, defaultAPIPath) + resp2, err := client.Get(baseURL.String()) + if err != nil { + return nil, err + } + var apiGroupList metav1.APIGroupList + defer resp2.Body.Close() + err = json.NewDecoder(resp2.Body).Decode(&apiGroupList) + if err != nil { + return nil, fmt.Errorf("unexpected error: %v", err) + } + + for _, g := range apiGroupList.Groups { + for _, gv := range g.Versions { + groupVersions = append(groupVersions, gv.GroupVersion) + } + } + + return groupVersions, nil +} diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go new file mode 100644 index 000000000..73bb63add --- /dev/null +++ b/vendor/k8s.io/client-go/rest/watch/decoder.go @@ -0,0 +1,72 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versioned + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + "k8s.io/apimachinery/pkg/watch" +) + +// Decoder implements the watch.Decoder interface for io.ReadClosers that +// have contents which consist of a series of watchEvent objects encoded +// with the given streaming decoder. The internal objects will be then +// decoded by the embedded decoder. +type Decoder struct { + decoder streaming.Decoder + embeddedDecoder runtime.Decoder +} + +// NewDecoder creates an Decoder for the given writer and codec. +func NewDecoder(decoder streaming.Decoder, embeddedDecoder runtime.Decoder) *Decoder { + return &Decoder{ + decoder: decoder, + embeddedDecoder: embeddedDecoder, + } +} + +// Decode blocks until it can return the next object in the reader. Returns an error +// if the reader is closed or an object can't be decoded. +func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) { + var got metav1.WatchEvent + res, _, err := d.decoder.Decode(nil, &got) + if err != nil { + return "", nil, err + } + if res != &got { + return "", nil, fmt.Errorf("unable to decode to metav1.Event") + } + switch got.Type { + case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error): + default: + return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type) + } + + obj, err := runtime.Decode(d.embeddedDecoder, got.Object.Raw) + if err != nil { + return "", nil, fmt.Errorf("unable to decode watch event: %v", err) + } + return watch.EventType(got.Type), obj, nil +} + +// Close closes the underlying r. +func (d *Decoder) Close() { + d.decoder.Close() +} diff --git a/vendor/k8s.io/client-go/rest/watch/encoder.go b/vendor/k8s.io/client-go/rest/watch/encoder.go new file mode 100644 index 000000000..e55aa12d9 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/watch/encoder.go @@ -0,0 +1,56 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versioned + +import ( + "encoding/json" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + "k8s.io/apimachinery/pkg/watch" +) + +// Encoder serializes watch.Events into io.Writer. The internal objects +// are encoded using embedded encoder, and the outer Event is serialized +// using encoder. +// TODO: this type is only used by tests +type Encoder struct { + encoder streaming.Encoder + embeddedEncoder runtime.Encoder +} + +func NewEncoder(encoder streaming.Encoder, embeddedEncoder runtime.Encoder) *Encoder { + return &Encoder{ + encoder: encoder, + embeddedEncoder: embeddedEncoder, + } +} + +// Encode writes an event to the writer. Returns an error +// if the writer is closed or an object can't be encoded. +func (e *Encoder) Encode(event *watch.Event) error { + data, err := runtime.Encode(e.embeddedEncoder, event.Object) + if err != nil { + return err + } + // FIXME: get rid of json.RawMessage. + return e.encoder.Encode(&metav1.WatchEvent{ + Type: string(event.Type), + Object: runtime.RawExtension{Raw: json.RawMessage(data)}, + }) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go new file mode 100644 index 000000000..43e26487c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go @@ -0,0 +1,183 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" +) + +func init() { + sDec, _ := base64.StdEncoding.DecodeString("REDACTED+") + redactedBytes = []byte(string(sDec)) +} + +// IsConfigEmpty returns true if the config is empty. +func IsConfigEmpty(config *Config) bool { + return len(config.AuthInfos) == 0 && len(config.Clusters) == 0 && len(config.Contexts) == 0 && + len(config.CurrentContext) == 0 && + len(config.Preferences.Extensions) == 0 && !config.Preferences.Colors && + len(config.Extensions) == 0 +} + +// MinifyConfig read the current context and uses that to keep only the relevant pieces of config +// This is useful for making secrets based on kubeconfig files +func MinifyConfig(config *Config) error { + if len(config.CurrentContext) == 0 { + return errors.New("current-context must exist in order to minify") + } + + currContext, exists := config.Contexts[config.CurrentContext] + if !exists { + return fmt.Errorf("cannot locate context %v", config.CurrentContext) + } + + newContexts := map[string]*Context{} + newContexts[config.CurrentContext] = currContext + + newClusters := map[string]*Cluster{} + if len(currContext.Cluster) > 0 { + if _, exists := config.Clusters[currContext.Cluster]; !exists { + return fmt.Errorf("cannot locate cluster %v", currContext.Cluster) + } + + newClusters[currContext.Cluster] = config.Clusters[currContext.Cluster] + } + + newAuthInfos := map[string]*AuthInfo{} + if len(currContext.AuthInfo) > 0 { + if _, exists := config.AuthInfos[currContext.AuthInfo]; !exists { + return fmt.Errorf("cannot locate user %v", currContext.AuthInfo) + } + + newAuthInfos[currContext.AuthInfo] = config.AuthInfos[currContext.AuthInfo] + } + + config.AuthInfos = newAuthInfos + config.Clusters = newClusters + config.Contexts = newContexts + + return nil +} + +var redactedBytes []byte + +// Flatten redacts raw data entries from the config object for a human-readable view. +func ShortenConfig(config *Config) { + // trick json encoder into printing a human readable string in the raw data + // by base64 decoding what we want to print. Relies on implementation of + // http://golang.org/pkg/encoding/json/#Marshal using base64 to encode []byte + for key, authInfo := range config.AuthInfos { + if len(authInfo.ClientKeyData) > 0 { + authInfo.ClientKeyData = redactedBytes + } + if len(authInfo.ClientCertificateData) > 0 { + authInfo.ClientCertificateData = redactedBytes + } + config.AuthInfos[key] = authInfo + } + for key, cluster := range config.Clusters { + if len(cluster.CertificateAuthorityData) > 0 { + cluster.CertificateAuthorityData = redactedBytes + } + config.Clusters[key] = cluster + } +} + +// Flatten changes the config object into a self contained config (useful for making secrets) +func FlattenConfig(config *Config) error { + for key, authInfo := range config.AuthInfos { + baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "") + if err != nil { + return err + } + + if err := FlattenContent(&authInfo.ClientCertificate, &authInfo.ClientCertificateData, baseDir); err != nil { + return err + } + if err := FlattenContent(&authInfo.ClientKey, &authInfo.ClientKeyData, baseDir); err != nil { + return err + } + + config.AuthInfos[key] = authInfo + } + for key, cluster := range config.Clusters { + baseDir, err := MakeAbs(path.Dir(cluster.LocationOfOrigin), "") + if err != nil { + return err + } + + if err := FlattenContent(&cluster.CertificateAuthority, &cluster.CertificateAuthorityData, baseDir); err != nil { + return err + } + + config.Clusters[key] = cluster + } + + return nil +} + +func FlattenContent(path *string, contents *[]byte, baseDir string) error { + if len(*path) != 0 { + if len(*contents) > 0 { + return errors.New("cannot have values for both path and contents") + } + + var err error + absPath := ResolvePath(*path, baseDir) + *contents, err = ioutil.ReadFile(absPath) + if err != nil { + return err + } + + *path = "" + } + + return nil +} + +// ResolvePath returns the path as an absolute paths, relative to the given base directory +func ResolvePath(path string, base string) string { + // Don't resolve empty paths + if len(path) > 0 { + // Don't resolve absolute paths + if !filepath.IsAbs(path) { + return filepath.Join(base, path) + } + } + + return path +} + +func MakeAbs(path, base string) (string, error) { + if filepath.IsAbs(path) { + return path, nil + } + if len(base) == 0 { + cwd, err := os.Getwd() + if err != nil { + return "", err + } + base = cwd + } + return filepath.Join(base, path), nil +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/register.go b/vendor/k8s.io/client-go/tools/clientcmd/api/register.go new file mode 100644 index 000000000..2eec3881c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/register.go @@ -0,0 +1,46 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +// TODO this should be in the "kubeconfig" group +var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Config{}, + ) + return nil +} + +func (obj *Config) GetObjectKind() schema.ObjectKind { return obj } +func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *Config) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go new file mode 100644 index 000000000..76090c6f5 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -0,0 +1,185 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// Where possible, json tags match the cli argument names. +// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. + +// Config holds the information needed to build connect to remote kubernetes clusters as a given user +// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +type Config struct { + // Legacy field from pkg/api/types.go TypeMeta. + // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +optional + Kind string `json:"kind,omitempty"` + // Legacy field from pkg/api/types.go TypeMeta. + // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // Preferences holds general information to be use for cli interactions + Preferences Preferences `json:"preferences"` + // Clusters is a map of referencable names to cluster configs + Clusters map[string]*Cluster `json:"clusters"` + // AuthInfos is a map of referencable names to user configs + AuthInfos map[string]*AuthInfo `json:"users"` + // Contexts is a map of referencable names to context configs + Contexts map[string]*Context `json:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `json:"current-context"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +type Preferences struct { + // +optional + Colors bool `json:"colors,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// Cluster contains information about how to communicate with a kubernetes cluster +type Cluster struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + // +optional + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + // +optional + CertificateAuthority string `json:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + // +optional + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type AuthInfo struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // ClientCertificate is the path to a client cert file for TLS. + // +optional + ClientCertificate string `json:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + // +optional + ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + // +optional + ClientKey string `json:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + // +optional + ClientKeyData []byte `json:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + // +optional + Token string `json:"token,omitempty"` + // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. + // +optional + TokenFile string `json:"tokenFile,omitempty"` + // Impersonate is the username to act-as. + // +optional + Impersonate string `json:"act-as,omitempty"` + // ImpersonateGroups is the groups to imperonate. + // +optional + ImpersonateGroups []string `json:"act-as-groups,omitempty"` + // ImpersonateUserExtra contains additional information for impersonated user. + // +optional + ImpersonateUserExtra map[string][]string `json:"act-as-user-extra,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + // +optional + Username string `json:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + // +optional + Password string `json:"password,omitempty"` + // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. + // +optional + AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type Context struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Cluster is the name of the cluster for this context + Cluster string `json:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `json:"user"` + // Namespace is the default namespace to use on unspecified requests + // +optional + Namespace string `json:"namespace,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// AuthProviderConfig holds the configuration for a specified auth provider. +type AuthProviderConfig struct { + Name string `json:"name"` + // +optional + Config map[string]string `json:"config,omitempty"` +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewConfig() *Config { + return &Config{ + Preferences: *NewPreferences(), + Clusters: make(map[string]*Cluster), + AuthInfos: make(map[string]*AuthInfo), + Contexts: make(map[string]*Context), + Extensions: make(map[string]runtime.Object), + } +} + +// NewContext is a convenience function that returns a new Context +// object with non-nil maps +func NewContext() *Context { + return &Context{Extensions: make(map[string]runtime.Object)} +} + +// NewCluster is a convenience function that returns a new Cluster +// object with non-nil maps +func NewCluster() *Cluster { + return &Cluster{Extensions: make(map[string]runtime.Object)} +} + +// NewAuthInfo is a convenience function that returns a new AuthInfo +// object with non-nil maps +func NewAuthInfo() *AuthInfo { + return &AuthInfo{ + Extensions: make(map[string]runtime.Object), + ImpersonateUserExtra: make(map[string][]string), + } +} + +// NewPreferences is a convenience function that returns a new +// Preferences object with non-nil maps +func NewPreferences() *Preferences { + return &Preferences{Extensions: make(map[string]runtime.Object)} +} diff --git a/vendor/k8s.io/client-go/tools/metrics/metrics.go b/vendor/k8s.io/client-go/tools/metrics/metrics.go new file mode 100644 index 000000000..a01306c65 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/metrics/metrics.go @@ -0,0 +1,61 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package metrics provides abstractions for registering which metrics +// to record. +package metrics + +import ( + "net/url" + "sync" + "time" +) + +var registerMetrics sync.Once + +// LatencyMetric observes client latency partitioned by verb and url. +type LatencyMetric interface { + Observe(verb string, u url.URL, latency time.Duration) +} + +// ResultMetric counts response codes partitioned by method and host. +type ResultMetric interface { + Increment(code string, method string, host string) +} + +var ( + // RequestLatency is the latency metric that rest clients will update. + RequestLatency LatencyMetric = noopLatency{} + // RequestResult is the result metric that rest clients will update. + RequestResult ResultMetric = noopResult{} +) + +// Register registers metrics for the rest client to use. This can +// only be called once. +func Register(lm LatencyMetric, rm ResultMetric) { + registerMetrics.Do(func() { + RequestLatency = lm + RequestResult = rm + }) +} + +type noopLatency struct{} + +func (noopLatency) Observe(string, url.URL, time.Duration) {} + +type noopResult struct{} + +func (noopResult) Increment(string, string, string) {} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/doc.go b/vendor/k8s.io/client-go/tools/remotecommand/doc.go new file mode 100644 index 000000000..ac06a9cd3 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package remotecommand adds support for executing commands in containers, +// with support for separate stdin, stdout, and stderr streams, as well as +// TTY. +package remotecommand // import "k8s.io/client-go/tools/remotecommand" diff --git a/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go b/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go new file mode 100644 index 000000000..360276b65 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + "io/ioutil" + + "k8s.io/apimachinery/pkg/util/runtime" +) + +// errorStreamDecoder interprets the data on the error channel and creates a go error object from it. +type errorStreamDecoder interface { + decode(message []byte) error +} + +// watchErrorStream watches the errorStream for remote command error data, +// decodes it with the given errorStreamDecoder, sends the decoded error (or nil if the remote +// command exited successfully) to the returned error channel, and closes it. +// This function returns immediately. +func watchErrorStream(errorStream io.Reader, d errorStreamDecoder) chan error { + errorChan := make(chan error) + + go func() { + defer runtime.HandleCrash() + + message, err := ioutil.ReadAll(errorStream) + switch { + case err != nil && err != io.EOF: + errorChan <- fmt.Errorf("error reading from error stream: %s", err) + case len(message) > 0: + errorChan <- d.decode(message) + default: + errorChan <- nil + } + close(errorChan) + }() + + return errorChan +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go new file mode 100644 index 000000000..a90fab1fe --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go @@ -0,0 +1,178 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + "net/http" + "net/url" + + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/httpstream/spdy" + "k8s.io/apimachinery/pkg/util/remotecommand" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/transport" +) + +// StreamOptions holds information pertaining to the current streaming session: supported stream +// protocols, input/output streams, if the client is requesting a TTY, and a terminal size queue to +// support terminal resizing. +type StreamOptions struct { + SupportedProtocols []string + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + Tty bool + TerminalSizeQueue TerminalSizeQueue +} + +// Executor is an interface for transporting shell-style streams. +type Executor interface { + // Stream initiates the transport of the standard shell streams. It will transport any + // non-nil stream to a remote system, and return an error if a problem occurs. If tty + // is set, the stderr stream is not used (raw TTY manages stdout and stderr over the + // stdout stream). + Stream(options StreamOptions) error +} + +// StreamExecutor supports the ability to dial an httpstream connection and the ability to +// run a command line stream protocol over that dialer. +type StreamExecutor interface { + Executor + httpstream.Dialer +} + +// streamExecutor handles transporting standard shell streams over an httpstream connection. +type streamExecutor struct { + upgrader httpstream.UpgradeRoundTripper + transport http.RoundTripper + + method string + url *url.URL +} + +// NewExecutor connects to the provided server and upgrades the connection to +// multiplexed bidirectional streams. The current implementation uses SPDY, +// but this could be replaced with HTTP/2 once it's available, or something else. +// TODO: the common code between this and portforward could be abstracted. +func NewExecutor(config *restclient.Config, method string, url *url.URL) (StreamExecutor, error) { + tlsConfig, err := restclient.TLSConfigFor(config) + if err != nil { + return nil, err + } + + upgradeRoundTripper := spdy.NewRoundTripper(tlsConfig, true) + wrapper, err := restclient.HTTPWrappersForConfig(config, upgradeRoundTripper) + if err != nil { + return nil, err + } + + return &streamExecutor{ + upgrader: upgradeRoundTripper, + transport: wrapper, + method: method, + url: url, + }, nil +} + +// NewStreamExecutor upgrades the request so that it supports multiplexed bidirectional +// streams. This method takes a stream upgrader and an optional function that is invoked +// to wrap the round tripper. This method may be used by clients that are lower level than +// Kubernetes clients or need to provide their own upgrade round tripper. +func NewStreamExecutor(upgrader httpstream.UpgradeRoundTripper, fn func(http.RoundTripper) http.RoundTripper, method string, url *url.URL) (StreamExecutor, error) { + rt := http.RoundTripper(upgrader) + if fn != nil { + rt = fn(rt) + } + return &streamExecutor{ + upgrader: upgrader, + transport: rt, + method: method, + url: url, + }, nil +} + +// Dial opens a connection to a remote server and attempts to negotiate a SPDY +// connection. Upon success, it returns the connection and the protocol +// selected by the server. +func (e *streamExecutor) Dial(protocols ...string) (httpstream.Connection, string, error) { + rt := transport.DebugWrappers(e.transport) + + // TODO the client probably shouldn't be created here, as it doesn't allow + // flexibility to allow callers to configure it. + client := &http.Client{Transport: rt} + + req, err := http.NewRequest(e.method, e.url.String(), nil) + if err != nil { + return nil, "", fmt.Errorf("error creating request: %v", err) + } + for i := range protocols { + req.Header.Add(httpstream.HeaderProtocolVersion, protocols[i]) + } + + resp, err := client.Do(req) + if err != nil { + return nil, "", fmt.Errorf("error sending request: %v", err) + } + defer resp.Body.Close() + + conn, err := e.upgrader.NewConnection(resp) + if err != nil { + return nil, "", err + } + + return conn, resp.Header.Get(httpstream.HeaderProtocolVersion), nil +} + +type streamCreator interface { + CreateStream(headers http.Header) (httpstream.Stream, error) +} + +type streamProtocolHandler interface { + stream(conn streamCreator) error +} + +// Stream opens a protocol streamer to the server and streams until a client closes +// the connection or the server disconnects. +func (e *streamExecutor) Stream(options StreamOptions) error { + conn, protocol, err := e.Dial(options.SupportedProtocols...) + if err != nil { + return err + } + defer conn.Close() + + var streamer streamProtocolHandler + + switch protocol { + case remotecommand.StreamProtocolV4Name: + streamer = newStreamProtocolV4(options) + case remotecommand.StreamProtocolV3Name: + streamer = newStreamProtocolV3(options) + case remotecommand.StreamProtocolV2Name: + streamer = newStreamProtocolV2(options) + case "": + glog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) + fallthrough + case remotecommand.StreamProtocolV1Name: + streamer = newStreamProtocolV1(options) + } + + return streamer.stream(conn) +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/resize.go b/vendor/k8s.io/client-go/tools/remotecommand/resize.go new file mode 100644 index 000000000..c838f21ba --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/resize.go @@ -0,0 +1,33 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +// TerminalSize and TerminalSizeQueue was a part of k8s.io/kubernetes/pkg/util/term +// and were moved in order to decouple client from other term dependencies + +// TerminalSize represents the width and height of a terminal. +type TerminalSize struct { + Width uint16 + Height uint16 +} + +// TerminalSizeQueue is capable of returning terminal resize events as they occur. +type TerminalSizeQueue interface { + // Next returns the new terminal size after the terminal has been resized. It returns nil when + // monitoring has been stopped. + Next() *TerminalSize +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v1.go b/vendor/k8s.io/client-go/tools/remotecommand/v1.go new file mode 100644 index 000000000..1db917c0b --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v1.go @@ -0,0 +1,160 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/client-go/pkg/api/v1" +) + +// streamProtocolV1 implements the first version of the streaming exec & attach +// protocol. This version has some bugs, such as not being able to detect when +// non-interactive stdin data has ended. See http://issues.k8s.io/13394 and +// http://issues.k8s.io/13395 for more details. +type streamProtocolV1 struct { + StreamOptions + + errorStream httpstream.Stream + remoteStdin httpstream.Stream + remoteStdout httpstream.Stream + remoteStderr httpstream.Stream +} + +var _ streamProtocolHandler = &streamProtocolV1{} + +func newStreamProtocolV1(options StreamOptions) streamProtocolHandler { + return &streamProtocolV1{ + StreamOptions: options, + } +} + +func (p *streamProtocolV1) stream(conn streamCreator) error { + doneChan := make(chan struct{}, 2) + errorChan := make(chan error) + + cp := func(s string, dst io.Writer, src io.Reader) { + glog.V(6).Infof("Copying %s", s) + defer glog.V(6).Infof("Done copying %s", s) + if _, err := io.Copy(dst, src); err != nil && err != io.EOF { + glog.Errorf("Error copying %s: %v", s, err) + } + if s == v1.StreamTypeStdout || s == v1.StreamTypeStderr { + doneChan <- struct{}{} + } + } + + // set up all the streams first + var err error + headers := http.Header{} + headers.Set(v1.StreamType, v1.StreamTypeError) + p.errorStream, err = conn.CreateStream(headers) + if err != nil { + return err + } + defer p.errorStream.Reset() + + // Create all the streams first, then start the copy goroutines. The server doesn't start its copy + // goroutines until it's received all of the streams. If the client creates the stdin stream and + // immediately begins copying stdin data to the server, it's possible to overwhelm and wedge the + // spdy frame handler in the server so that it is full of unprocessed frames. The frames aren't + // getting processed because the server hasn't started its copying, and it won't do that until it + // gets all the streams. By creating all the streams first, we ensure that the server is ready to + // process data before the client starts sending any. See https://issues.k8s.io/16373 for more info. + if p.Stdin != nil { + headers.Set(v1.StreamType, v1.StreamTypeStdin) + p.remoteStdin, err = conn.CreateStream(headers) + if err != nil { + return err + } + defer p.remoteStdin.Reset() + } + + if p.Stdout != nil { + headers.Set(v1.StreamType, v1.StreamTypeStdout) + p.remoteStdout, err = conn.CreateStream(headers) + if err != nil { + return err + } + defer p.remoteStdout.Reset() + } + + if p.Stderr != nil && !p.Tty { + headers.Set(v1.StreamType, v1.StreamTypeStderr) + p.remoteStderr, err = conn.CreateStream(headers) + if err != nil { + return err + } + defer p.remoteStderr.Reset() + } + + // now that all the streams have been created, proceed with reading & copying + + // always read from errorStream + go func() { + message, err := ioutil.ReadAll(p.errorStream) + if err != nil && err != io.EOF { + errorChan <- fmt.Errorf("Error reading from error stream: %s", err) + return + } + if len(message) > 0 { + errorChan <- fmt.Errorf("Error executing remote command: %s", message) + return + } + }() + + if p.Stdin != nil { + // TODO this goroutine will never exit cleanly (the io.Copy never unblocks) + // because stdin is not closed until the process exits. If we try to call + // stdin.Close(), it returns no error but doesn't unblock the copy. It will + // exit when the process exits, instead. + go cp(v1.StreamTypeStdin, p.remoteStdin, p.Stdin) + } + + waitCount := 0 + completedStreams := 0 + + if p.Stdout != nil { + waitCount++ + go cp(v1.StreamTypeStdout, p.Stdout, p.remoteStdout) + } + + if p.Stderr != nil && !p.Tty { + waitCount++ + go cp(v1.StreamTypeStderr, p.Stderr, p.remoteStderr) + } + +Loop: + for { + select { + case <-doneChan: + completedStreams++ + if completedStreams == waitCount { + break Loop + } + case err := <-errorChan: + return err + } + } + + return nil +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v2.go b/vendor/k8s.io/client-go/tools/remotecommand/v2.go new file mode 100644 index 000000000..95346a439 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v2.go @@ -0,0 +1,195 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "sync" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/pkg/api/v1" +) + +// streamProtocolV2 implements version 2 of the streaming protocol for attach +// and exec. The original streaming protocol was metav1. As a result, this +// version is referred to as version 2, even though it is the first actual +// numbered version. +type streamProtocolV2 struct { + StreamOptions + + errorStream io.Reader + remoteStdin io.ReadWriteCloser + remoteStdout io.Reader + remoteStderr io.Reader +} + +var _ streamProtocolHandler = &streamProtocolV2{} + +func newStreamProtocolV2(options StreamOptions) streamProtocolHandler { + return &streamProtocolV2{ + StreamOptions: options, + } +} + +func (p *streamProtocolV2) createStreams(conn streamCreator) error { + var err error + headers := http.Header{} + + // set up error stream + headers.Set(v1.StreamType, v1.StreamTypeError) + p.errorStream, err = conn.CreateStream(headers) + if err != nil { + return err + } + + // set up stdin stream + if p.Stdin != nil { + headers.Set(v1.StreamType, v1.StreamTypeStdin) + p.remoteStdin, err = conn.CreateStream(headers) + if err != nil { + return err + } + } + + // set up stdout stream + if p.Stdout != nil { + headers.Set(v1.StreamType, v1.StreamTypeStdout) + p.remoteStdout, err = conn.CreateStream(headers) + if err != nil { + return err + } + } + + // set up stderr stream + if p.Stderr != nil && !p.Tty { + headers.Set(v1.StreamType, v1.StreamTypeStderr) + p.remoteStderr, err = conn.CreateStream(headers) + if err != nil { + return err + } + } + return nil +} + +func (p *streamProtocolV2) copyStdin() { + if p.Stdin != nil { + var once sync.Once + + // copy from client's stdin to container's stdin + go func() { + defer runtime.HandleCrash() + + // if p.stdin is noninteractive, p.g. `echo abc | kubectl exec -i -- cat`, make sure + // we close remoteStdin as soon as the copy from p.stdin to remoteStdin finishes. Otherwise + // the executed command will remain running. + defer once.Do(func() { p.remoteStdin.Close() }) + + if _, err := io.Copy(p.remoteStdin, p.Stdin); err != nil { + runtime.HandleError(err) + } + }() + + // read from remoteStdin until the stream is closed. this is essential to + // be able to exit interactive sessions cleanly and not leak goroutines or + // hang the client's terminal. + // + // TODO we aren't using go-dockerclient any more; revisit this to determine if it's still + // required by engine-api. + // + // go-dockerclient's current hijack implementation + // (https://github.com/fsouza/go-dockerclient/blob/89f3d56d93788dfe85f864a44f85d9738fca0670/client.go#L564) + // waits for all three streams (stdin/stdout/stderr) to finish copying + // before returning. When hijack finishes copying stdout/stderr, it calls + // Close() on its side of remoteStdin, which allows this copy to complete. + // When that happens, we must Close() on our side of remoteStdin, to + // allow the copy in hijack to complete, and hijack to return. + go func() { + defer runtime.HandleCrash() + defer once.Do(func() { p.remoteStdin.Close() }) + + // this "copy" doesn't actually read anything - it's just here to wait for + // the server to close remoteStdin. + if _, err := io.Copy(ioutil.Discard, p.remoteStdin); err != nil { + runtime.HandleError(err) + } + }() + } +} + +func (p *streamProtocolV2) copyStdout(wg *sync.WaitGroup) { + if p.Stdout == nil { + return + } + + wg.Add(1) + go func() { + defer runtime.HandleCrash() + defer wg.Done() + + if _, err := io.Copy(p.Stdout, p.remoteStdout); err != nil { + runtime.HandleError(err) + } + }() +} + +func (p *streamProtocolV2) copyStderr(wg *sync.WaitGroup) { + if p.Stderr == nil || p.Tty { + return + } + + wg.Add(1) + go func() { + defer runtime.HandleCrash() + defer wg.Done() + + if _, err := io.Copy(p.Stderr, p.remoteStderr); err != nil { + runtime.HandleError(err) + } + }() +} + +func (p *streamProtocolV2) stream(conn streamCreator) error { + if err := p.createStreams(conn); err != nil { + return err + } + + // now that all the streams have been created, proceed with reading & copying + + errorChan := watchErrorStream(p.errorStream, &errorDecoderV2{}) + + p.copyStdin() + + var wg sync.WaitGroup + p.copyStdout(&wg) + p.copyStderr(&wg) + + // we're waiting for stdout/stderr to finish copying + wg.Wait() + + // waits for errorStream to finish reading with an error or nil + return <-errorChan +} + +// errorDecoderV2 interprets the error channel data as plain text. +type errorDecoderV2 struct{} + +func (d *errorDecoderV2) decode(message []byte) error { + return fmt.Errorf("error executing remote command: %s", message) +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v3.go b/vendor/k8s.io/client-go/tools/remotecommand/v3.go new file mode 100644 index 000000000..03b9e2a68 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v3.go @@ -0,0 +1,111 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "encoding/json" + "io" + "net/http" + "sync" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/pkg/api/v1" +) + +// streamProtocolV3 implements version 3 of the streaming protocol for attach +// and exec. This version adds support for resizing the container's terminal. +type streamProtocolV3 struct { + *streamProtocolV2 + + resizeStream io.Writer +} + +var _ streamProtocolHandler = &streamProtocolV3{} + +func newStreamProtocolV3(options StreamOptions) streamProtocolHandler { + return &streamProtocolV3{ + streamProtocolV2: newStreamProtocolV2(options).(*streamProtocolV2), + } +} + +func (p *streamProtocolV3) createStreams(conn streamCreator) error { + // set up the streams from v2 + if err := p.streamProtocolV2.createStreams(conn); err != nil { + return err + } + + // set up resize stream + if p.Tty { + headers := http.Header{} + headers.Set(v1.StreamType, v1.StreamTypeResize) + var err error + p.resizeStream, err = conn.CreateStream(headers) + if err != nil { + return err + } + } + + return nil +} + +func (p *streamProtocolV3) handleResizes() { + if p.resizeStream == nil || p.TerminalSizeQueue == nil { + return + } + go func() { + defer runtime.HandleCrash() + + encoder := json.NewEncoder(p.resizeStream) + for { + size := p.TerminalSizeQueue.Next() + if size == nil { + return + } + if err := encoder.Encode(&size); err != nil { + runtime.HandleError(err) + } + } + }() +} + +func (p *streamProtocolV3) stream(conn streamCreator) error { + if err := p.createStreams(conn); err != nil { + return err + } + + // now that all the streams have been created, proceed with reading & copying + + errorChan := watchErrorStream(p.errorStream, &errorDecoderV3{}) + + p.handleResizes() + + p.copyStdin() + + var wg sync.WaitGroup + p.copyStdout(&wg) + p.copyStderr(&wg) + + // we're waiting for stdout/stderr to finish copying + wg.Wait() + + // waits for errorStream to finish reading with an error or nil + return <-errorChan +} + +type errorDecoderV3 struct { + errorDecoderV2 +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v4.go b/vendor/k8s.io/client-go/tools/remotecommand/v4.go new file mode 100644 index 000000000..69ca934a0 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v4.go @@ -0,0 +1,119 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "sync" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/remotecommand" + "k8s.io/client-go/util/exec" +) + +// streamProtocolV4 implements version 4 of the streaming protocol for attach +// and exec. This version adds support for exit codes on the error stream through +// the use of metav1.Status instead of plain text messages. +type streamProtocolV4 struct { + *streamProtocolV3 +} + +var _ streamProtocolHandler = &streamProtocolV4{} + +func newStreamProtocolV4(options StreamOptions) streamProtocolHandler { + return &streamProtocolV4{ + streamProtocolV3: newStreamProtocolV3(options).(*streamProtocolV3), + } +} + +func (p *streamProtocolV4) createStreams(conn streamCreator) error { + return p.streamProtocolV3.createStreams(conn) +} + +func (p *streamProtocolV4) handleResizes() { + p.streamProtocolV3.handleResizes() +} + +func (p *streamProtocolV4) stream(conn streamCreator) error { + if err := p.createStreams(conn); err != nil { + return err + } + + // now that all the streams have been created, proceed with reading & copying + + errorChan := watchErrorStream(p.errorStream, &errorDecoderV4{}) + + p.handleResizes() + + p.copyStdin() + + var wg sync.WaitGroup + p.copyStdout(&wg) + p.copyStderr(&wg) + + // we're waiting for stdout/stderr to finish copying + wg.Wait() + + // waits for errorStream to finish reading with an error or nil + return <-errorChan +} + +// errorDecoderV4 interprets the json-marshaled metav1.Status on the error channel +// and creates an exec.ExitError from it. +type errorDecoderV4 struct{} + +func (d *errorDecoderV4) decode(message []byte) error { + status := metav1.Status{} + err := json.Unmarshal(message, &status) + if err != nil { + return fmt.Errorf("error stream protocol error: %v in %q", err, string(message)) + } + switch status.Status { + case metav1.StatusSuccess: + return nil + case metav1.StatusFailure: + if status.Reason == remotecommand.NonZeroExitCodeReason { + if status.Details == nil { + return errors.New("error stream protocol error: details must be set") + } + for i := range status.Details.Causes { + c := &status.Details.Causes[i] + if c.Type != remotecommand.ExitCodeCauseType { + continue + } + + rc, err := strconv.ParseUint(c.Message, 10, 8) + if err != nil { + return fmt.Errorf("error stream protocol error: invalid exit code value %q", c.Message) + } + return exec.CodeExitError{ + Err: fmt.Errorf("command terminated with exit code %d", rc), + Code: int(rc), + } + } + + return fmt.Errorf("error stream protocol error: no %s cause given", remotecommand.ExitCodeCauseType) + } + default: + return errors.New("error stream protocol error: unknown error") + } + + return fmt.Errorf(status.Message) +} diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go new file mode 100644 index 000000000..8d76def34 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -0,0 +1,88 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "fmt" + "net" + "net/http" + "sync" + "time" + + utilnet "k8s.io/apimachinery/pkg/util/net" +) + +// TlsTransportCache caches TLS http.RoundTrippers different configurations. The +// same RoundTripper will be returned for configs with identical TLS options If +// the config has no custom TLS options, http.DefaultTransport is returned. +type tlsTransportCache struct { + mu sync.Mutex + transports map[string]*http.Transport +} + +const idleConnsPerHost = 25 + +var tlsCache = &tlsTransportCache{transports: make(map[string]*http.Transport)} + +func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { + key, err := tlsConfigKey(config) + if err != nil { + return nil, err + } + + // Ensure we only create a single transport for the given TLS options + c.mu.Lock() + defer c.mu.Unlock() + + // See if we already have a custom transport for this config + if t, ok := c.transports[key]; ok { + return t, nil + } + + // Get the TLS options for this client config + tlsConfig, err := TLSConfigFor(config) + if err != nil { + return nil, err + } + // The options didn't require a custom TLS config + if tlsConfig == nil { + return http.DefaultTransport, nil + } + + // Cache a single transport for these options + c.transports[key] = utilnet.SetTransportDefaults(&http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + MaxIdleConnsPerHost: idleConnsPerHost, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }) + return c.transports[key], nil +} + +// tlsConfigKey returns a unique key for tls.Config objects returned from TLSConfigFor +func tlsConfigKey(c *Config) (string, error) { + // Make sure ca/key/cert content is loaded + if err := loadTLSFiles(c); err != nil { + return "", err + } + // Only include the things that actually affect the tls.Config + return fmt.Sprintf("%v/%x/%x/%x", c.TLS.Insecure, c.TLS.CAData, c.TLS.CertData, c.TLS.KeyData), nil +} diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go new file mode 100644 index 000000000..820594ba3 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/config.go @@ -0,0 +1,95 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import "net/http" + +// Config holds various options for establishing a transport. +type Config struct { + // UserAgent is an optional field that specifies the caller of this + // request. + UserAgent string + + // The base TLS configuration for this transport. + TLS TLSConfig + + // Username and password for basic authentication + Username string + Password string + + // Bearer token for authentication + BearerToken string + + // Impersonate is the config that this Config will impersonate using + Impersonate ImpersonationConfig + + // Transport may be used for custom HTTP behavior. This attribute may + // not be specified with the TLS client certificate options. Use + // WrapTransport for most client level operations. + Transport http.RoundTripper + + // WrapTransport will be invoked for custom HTTP behavior after the + // underlying transport is initialized (either the transport created + // from TLSClientConfig, Transport, or http.DefaultTransport). The + // config may layer other RoundTrippers on top of the returned + // RoundTripper. + WrapTransport func(rt http.RoundTripper) http.RoundTripper +} + +// ImpersonationConfig has all the available impersonation options +type ImpersonationConfig struct { + // UserName matches user.Info.GetName() + UserName string + // Groups matches user.Info.GetGroups() + Groups []string + // Extra matches user.Info.GetExtra() + Extra map[string][]string +} + +// HasCA returns whether the configuration has a certificate authority or not. +func (c *Config) HasCA() bool { + return len(c.TLS.CAData) > 0 || len(c.TLS.CAFile) > 0 +} + +// HasBasicAuth returns whether the configuration has basic authentication or not. +func (c *Config) HasBasicAuth() bool { + return len(c.Username) != 0 +} + +// HasTokenAuth returns whether the configuration has token authentication or not. +func (c *Config) HasTokenAuth() bool { + return len(c.BearerToken) != 0 +} + +// HasCertAuth returns whether the configuration has certificate authentication or not. +func (c *Config) HasCertAuth() bool { + return len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0 +} + +// TLSConfig holds the information needed to set up a TLS transport. +type TLSConfig struct { + CAFile string // Path of the PEM-encoded server trusted root certificates. + CertFile string // Path of the PEM-encoded client certificate. + KeyFile string // Path of the PEM-encoded client key. + + Insecure bool // Server should be accessed without verifying the certificate. For testing only. + ServerName string // Override for the server name passed to the server for SNI and used to verify certificates. + + CAData []byte // Bytes of the PEM-encoded server trusted root certificates. Supercedes CAFile. + CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile. + KeyData []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile. +} diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go new file mode 100644 index 000000000..c728b1877 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -0,0 +1,424 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "fmt" + "net/http" + "strings" + "time" + + "github.com/golang/glog" + + utilnet "k8s.io/apimachinery/pkg/util/net" +) + +// HTTPWrappersForConfig wraps a round tripper with any relevant layered +// behavior from the config. Exposed to allow more clients that need HTTP-like +// behavior but then must hijack the underlying connection (like WebSocket or +// HTTP2 clients). Pure HTTP clients should use the RoundTripper returned from +// New. +func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) { + if config.WrapTransport != nil { + rt = config.WrapTransport(rt) + } + + rt = DebugWrappers(rt) + + // Set authentication wrappers + switch { + case config.HasBasicAuth() && config.HasTokenAuth(): + return nil, fmt.Errorf("username/password or bearer token may be set, but not both") + case config.HasTokenAuth(): + rt = NewBearerAuthRoundTripper(config.BearerToken, rt) + case config.HasBasicAuth(): + rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt) + } + if len(config.UserAgent) > 0 { + rt = NewUserAgentRoundTripper(config.UserAgent, rt) + } + if len(config.Impersonate.UserName) > 0 || + len(config.Impersonate.Groups) > 0 || + len(config.Impersonate.Extra) > 0 { + rt = NewImpersonatingRoundTripper(config.Impersonate, rt) + } + return rt, nil +} + +// DebugWrappers wraps a round tripper and logs based on the current log level. +func DebugWrappers(rt http.RoundTripper) http.RoundTripper { + switch { + case bool(glog.V(9)): + rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders) + case bool(glog.V(8)): + rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders) + case bool(glog.V(7)): + rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus) + case bool(glog.V(6)): + rt = newDebuggingRoundTripper(rt, debugURLTiming) + } + + return rt +} + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +type authProxyRoundTripper struct { + username string + groups []string + extra map[string][]string + + rt http.RoundTripper +} + +// NewAuthProxyRoundTripper provides a roundtripper which will add auth proxy fields to requests for +// authentication terminating proxy cases +// assuming you pull the user from the context: +// username is the user.Info.GetName() of the user +// groups is the user.Info.GetGroups() of the user +// extra is the user.Info.GetExtra() of the user +// extra can contain any additional information that the authenticator +// thought was interesting, for example authorization scopes. +// In order to faithfully round-trip through an impersonation flow, these keys +// MUST be lowercase. +func NewAuthProxyRoundTripper(username string, groups []string, extra map[string][]string, rt http.RoundTripper) http.RoundTripper { + return &authProxyRoundTripper{ + username: username, + groups: groups, + extra: extra, + rt: rt, + } +} + +func (rt *authProxyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req = utilnet.CloneRequest(req) + SetAuthProxyHeaders(req, rt.username, rt.groups, rt.extra) + + return rt.rt.RoundTrip(req) +} + +// SetAuthProxyHeaders stomps the auth proxy header fields. It mutates its argument. +func SetAuthProxyHeaders(req *http.Request, username string, groups []string, extra map[string][]string) { + req.Header.Del("X-Remote-User") + req.Header.Del("X-Remote-Group") + for key := range req.Header { + if strings.HasPrefix(strings.ToLower(key), strings.ToLower("X-Remote-Extra-")) { + req.Header.Del(key) + } + } + + req.Header.Set("X-Remote-User", username) + for _, group := range groups { + req.Header.Add("X-Remote-Group", group) + } + for key, values := range extra { + for _, value := range values { + req.Header.Add("X-Remote-Extra-"+key, value) + } + } +} + +func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *authProxyRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +type userAgentRoundTripper struct { + agent string + rt http.RoundTripper +} + +func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper { + return &userAgentRoundTripper{agent, rt} +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("User-Agent")) != 0 { + return rt.rt.RoundTrip(req) + } + req = utilnet.CloneRequest(req) + req.Header.Set("User-Agent", rt.agent) + return rt.rt.RoundTrip(req) +} + +func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +type basicAuthRoundTripper struct { + username string + password string + rt http.RoundTripper +} + +// NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a +// request unless it has already been set. +func NewBasicAuthRoundTripper(username, password string, rt http.RoundTripper) http.RoundTripper { + return &basicAuthRoundTripper{username, password, rt} +} + +func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Authorization")) != 0 { + return rt.rt.RoundTrip(req) + } + req = utilnet.CloneRequest(req) + req.SetBasicAuth(rt.username, rt.password) + return rt.rt.RoundTrip(req) +} + +func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +// These correspond to the headers used in pkg/apis/authentication. We don't want the package dependency, +// but you must not change the values. +const ( + // ImpersonateUserHeader is used to impersonate a particular user during an API server request + ImpersonateUserHeader = "Impersonate-User" + + // ImpersonateGroupHeader is used to impersonate a particular group during an API server request. + // It can be repeated multiplied times for multiple groups. + ImpersonateGroupHeader = "Impersonate-Group" + + // ImpersonateUserExtraHeaderPrefix is a prefix for a header used to impersonate an entry in the + // extra map[string][]string for user.Info. The key for the `extra` map is suffix. + // The same key can be repeated multiple times to have multiple elements in the slice under a single key. + // For instance: + // Impersonate-Extra-Foo: one + // Impersonate-Extra-Foo: two + // results in extra["Foo"] = []string{"one", "two"} + ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-" +) + +type impersonatingRoundTripper struct { + impersonate ImpersonationConfig + delegate http.RoundTripper +} + +// NewImpersonatingRoundTripper will add an Act-As header to a request unless it has already been set. +func NewImpersonatingRoundTripper(impersonate ImpersonationConfig, delegate http.RoundTripper) http.RoundTripper { + return &impersonatingRoundTripper{impersonate, delegate} +} + +func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + // use the user header as marker for the rest. + if len(req.Header.Get(ImpersonateUserHeader)) != 0 { + return rt.delegate.RoundTrip(req) + } + req = utilnet.CloneRequest(req) + req.Header.Set(ImpersonateUserHeader, rt.impersonate.UserName) + + for _, group := range rt.impersonate.Groups { + req.Header.Add(ImpersonateGroupHeader, group) + } + for k, vv := range rt.impersonate.Extra { + for _, v := range vv { + req.Header.Add(ImpersonateUserExtraHeaderPrefix+k, v) + } + } + + return rt.delegate.RoundTrip(req) +} + +func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.delegate.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegate } + +type bearerAuthRoundTripper struct { + bearer string + rt http.RoundTripper +} + +// NewBearerAuthRoundTripper adds the provided bearer token to a request +// unless the authorization header has already been set. +func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper { + return &bearerAuthRoundTripper{bearer, rt} +} + +func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Authorization")) != 0 { + return rt.rt.RoundTrip(req) + } + + req = utilnet.CloneRequest(req) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer)) + return rt.rt.RoundTrip(req) +} + +func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +// requestInfo keeps track of information about a request/response combination +type requestInfo struct { + RequestHeaders http.Header + RequestVerb string + RequestURL string + + ResponseStatus string + ResponseHeaders http.Header + ResponseErr error + + Duration time.Duration +} + +// newRequestInfo creates a new RequestInfo based on an http request +func newRequestInfo(req *http.Request) *requestInfo { + return &requestInfo{ + RequestURL: req.URL.String(), + RequestVerb: req.Method, + RequestHeaders: req.Header, + } +} + +// complete adds information about the response to the requestInfo +func (r *requestInfo) complete(response *http.Response, err error) { + if err != nil { + r.ResponseErr = err + return + } + r.ResponseStatus = response.Status + r.ResponseHeaders = response.Header +} + +// toCurl returns a string that can be run as a command in a terminal (minus the body) +func (r *requestInfo) toCurl() string { + headers := "" + for key, values := range r.RequestHeaders { + for _, value := range values { + headers += fmt.Sprintf(` -H %q`, fmt.Sprintf("%s: %s", key, value)) + } + } + + return fmt.Sprintf("curl -k -v -X%s %s %s", r.RequestVerb, headers, r.RequestURL) +} + +// debuggingRoundTripper will display information about the requests passing +// through it based on what is configured +type debuggingRoundTripper struct { + delegatedRoundTripper http.RoundTripper + + levels map[debugLevel]bool +} + +type debugLevel int + +const ( + debugJustURL debugLevel = iota + debugURLTiming + debugCurlCommand + debugRequestHeaders + debugResponseStatus + debugResponseHeaders +) + +func newDebuggingRoundTripper(rt http.RoundTripper, levels ...debugLevel) *debuggingRoundTripper { + drt := &debuggingRoundTripper{ + delegatedRoundTripper: rt, + levels: make(map[debugLevel]bool, len(levels)), + } + for _, v := range levels { + drt.levels[v] = true + } + return drt +} + +func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + reqInfo := newRequestInfo(req) + + if rt.levels[debugJustURL] { + glog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) + } + if rt.levels[debugCurlCommand] { + glog.Infof("%s", reqInfo.toCurl()) + + } + if rt.levels[debugRequestHeaders] { + glog.Infof("Request Headers:") + for key, values := range reqInfo.RequestHeaders { + for _, value := range values { + glog.Infof(" %s: %s", key, value) + } + } + } + + startTime := time.Now() + response, err := rt.delegatedRoundTripper.RoundTrip(req) + reqInfo.Duration = time.Since(startTime) + + reqInfo.complete(response, err) + + if rt.levels[debugURLTiming] { + glog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + } + if rt.levels[debugResponseStatus] { + glog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + } + if rt.levels[debugResponseHeaders] { + glog.Infof("Response Headers:") + for key, values := range reqInfo.ResponseHeaders { + for _, value := range values { + glog.Infof(" %s: %s", key, value) + } + } + } + + return response, err +} + +func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper { + return rt.delegatedRoundTripper +} diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go new file mode 100644 index 000000000..15be0a3e6 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -0,0 +1,141 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" +) + +// New returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. +func New(config *Config) (http.RoundTripper, error) { + // Set transport level security + if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.TLS.Insecure) { + return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed") + } + + var ( + rt http.RoundTripper + err error + ) + + if config.Transport != nil { + rt = config.Transport + } else { + rt, err = tlsCache.get(config) + if err != nil { + return nil, err + } + } + + return HTTPWrappersForConfig(config, rt) +} + +// TLSConfigFor returns a tls.Config that will provide the transport level security defined +// by the provided Config. Will return nil if no transport level security is requested. +func TLSConfigFor(c *Config) (*tls.Config, error) { + if !(c.HasCA() || c.HasCertAuth() || c.TLS.Insecure) { + return nil, nil + } + if c.HasCA() && c.TLS.Insecure { + return nil, fmt.Errorf("specifying a root certificates file with the insecure flag is not allowed") + } + if err := loadTLSFiles(c); err != nil { + return nil, err + } + + tlsConfig := &tls.Config{ + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + MinVersion: tls.VersionTLS12, + InsecureSkipVerify: c.TLS.Insecure, + ServerName: c.TLS.ServerName, + } + + if c.HasCA() { + tlsConfig.RootCAs = rootCertPool(c.TLS.CAData) + } + + if c.HasCertAuth() { + cert, err := tls.X509KeyPair(c.TLS.CertData, c.TLS.KeyData) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + return tlsConfig, nil +} + +// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, +// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are +// either populated or were empty to start. +func loadTLSFiles(c *Config) error { + var err error + c.TLS.CAData, err = dataFromSliceOrFile(c.TLS.CAData, c.TLS.CAFile) + if err != nil { + return err + } + + c.TLS.CertData, err = dataFromSliceOrFile(c.TLS.CertData, c.TLS.CertFile) + if err != nil { + return err + } + + c.TLS.KeyData, err = dataFromSliceOrFile(c.TLS.KeyData, c.TLS.KeyFile) + if err != nil { + return err + } + return nil +} + +// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, +// or an error if an error occurred reading the file +func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { + if len(data) > 0 { + return data, nil + } + if len(file) > 0 { + fileData, err := ioutil.ReadFile(file) + if err != nil { + return []byte{}, err + } + return fileData, nil + } + return nil, nil +} + +// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". +// When caData is not empty, it will be the ONLY information used in the CertPool. +func rootCertPool(caData []byte) *x509.CertPool { + // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go + // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values + // It doesn't allow trusting either/or, but hopefully that won't be an issue + if len(caData) == 0 { + return nil + } + + // if we have caData, use it + certPool := x509.NewCertPool() + certPool.AppendCertsFromPEM(caData) + return certPool +} diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go new file mode 100644 index 000000000..6854d4152 --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -0,0 +1,215 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "math" + "math/big" + "net" + "time" +) + +const ( + rsaKeySize = 2048 + duration365d = time.Hour * 24 * 365 +) + +// Config containes the basic fields required for creating a certificate +type Config struct { + CommonName string + Organization []string + AltNames AltNames + Usages []x509.ExtKeyUsage +} + +// AltNames contains the domain names and IP addresses that will be added +// to the API Server's x509 certificate SubAltNames field. The values will +// be passed directly to the x509.Certificate object. +type AltNames struct { + DNSNames []string + IPs []net.IP +} + +// NewPrivateKey creates an RSA private key +func NewPrivateKey() (*rsa.PrivateKey, error) { + return rsa.GenerateKey(cryptorand.Reader, rsaKeySize) +} + +// NewSelfSignedCACert creates a CA certificate +func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, error) { + now := time.Now() + tmpl := x509.Certificate{ + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{ + CommonName: cfg.CommonName, + Organization: cfg.Organization, + }, + NotBefore: now.UTC(), + NotAfter: now.Add(duration365d * 10).UTC(), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + IsCA: true, + } + + certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key) + if err != nil { + return nil, err + } + return x509.ParseCertificate(certDERBytes) +} + +// NewSignedCert creates a signed certificate using the given CA certificate and key +func NewSignedCert(cfg Config, key *rsa.PrivateKey, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, error) { + serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64)) + if err != nil { + return nil, err + } + if len(cfg.CommonName) == 0 { + return nil, errors.New("must specify a CommonName") + } + if len(cfg.Usages) == 0 { + return nil, errors.New("must specify at least one ExtKeyUsage") + } + + certTmpl := x509.Certificate{ + Subject: pkix.Name{ + CommonName: cfg.CommonName, + Organization: cfg.Organization, + }, + DNSNames: cfg.AltNames.DNSNames, + IPAddresses: cfg.AltNames.IPs, + SerialNumber: serial, + NotBefore: caCert.NotBefore, + NotAfter: time.Now().Add(duration365d).UTC(), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: cfg.Usages, + } + certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey) + if err != nil { + return nil, err + } + return x509.ParseCertificate(certDERBytes) +} + +// MakeEllipticPrivateKeyPEM creates an ECDSA private key +func MakeEllipticPrivateKeyPEM() ([]byte, error) { + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) + if err != nil { + return nil, err + } + + derBytes, err := x509.MarshalECPrivateKey(privateKey) + if err != nil { + return nil, err + } + + privateKeyPemBlock := &pem.Block{ + Type: ECPrivateKeyBlockType, + Bytes: derBytes, + } + return pem.EncodeToMemory(privateKeyPemBlock), nil +} + +// GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host. +// Host may be an IP or a DNS name +// You may also specify additional subject alt names (either ip or dns names) for the certificate +func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS []string) ([]byte, []byte, error) { + priv, err := rsa.GenerateKey(cryptorand.Reader, 2048) + if err != nil { + return nil, nil, err + } + + template := x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()), + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * 365), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + IsCA: true, + } + + if ip := net.ParseIP(host); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else { + template.DNSNames = append(template.DNSNames, host) + } + + template.IPAddresses = append(template.IPAddresses, alternateIPs...) + template.DNSNames = append(template.DNSNames, alternateDNS...) + + derBytes, err := x509.CreateCertificate(cryptorand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + return nil, nil, err + } + + // Generate cert + certBuffer := bytes.Buffer{} + if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: derBytes}); err != nil { + return nil, nil, err + } + + // Generate key + keyBuffer := bytes.Buffer{} + if err := pem.Encode(&keyBuffer, &pem.Block{Type: RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { + return nil, nil, err + } + + return certBuffer.Bytes(), keyBuffer.Bytes(), nil +} + +// FormatBytesCert receives byte array certificate and formats in human-readable format +func FormatBytesCert(cert []byte) (string, error) { + block, _ := pem.Decode(cert) + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return "", fmt.Errorf("failed to parse certificate [%v]", err) + } + return FormatCert(c), nil +} + +// FormatCert receives certificate and formats in human-readable format +func FormatCert(c *x509.Certificate) string { + var ips []string + for _, ip := range c.IPAddresses { + ips = append(ips, ip.String()) + } + altNames := append(ips, c.DNSNames...) + res := fmt.Sprintf( + "Issuer: CN=%s | Subject: CN=%s | CA: %t\n", + c.Issuer.CommonName, c.Subject.CommonName, c.IsCA, + ) + res += fmt.Sprintf("Not before: %s Not After: %s", c.NotBefore, c.NotAfter) + if len(altNames) > 0 { + res += fmt.Sprintf("\nAlternate Names: %v", altNames) + } + return res +} diff --git a/vendor/k8s.io/client-go/util/cert/csr.go b/vendor/k8s.io/client-go/util/cert/csr.go new file mode 100644 index 000000000..39a6751f7 --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/csr.go @@ -0,0 +1,75 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "net" +) + +// MakeCSR generates a PEM-encoded CSR using the supplied private key, subject, and SANs. +// All key types that are implemented via crypto.Signer are supported (This includes *rsa.PrivateKey and *ecdsa.PrivateKey.) +func MakeCSR(privateKey interface{}, subject *pkix.Name, dnsSANs []string, ipSANs []net.IP) (csr []byte, err error) { + template := &x509.CertificateRequest{ + Subject: *subject, + DNSNames: dnsSANs, + IPAddresses: ipSANs, + } + + return MakeCSRFromTemplate(privateKey, template) +} + +// MakeCSRFromTemplate generates a PEM-encoded CSR using the supplied private +// key and certificate request as a template. All key types that are +// implemented via crypto.Signer are supported (This includes *rsa.PrivateKey +// and *ecdsa.PrivateKey.) +func MakeCSRFromTemplate(privateKey interface{}, template *x509.CertificateRequest) ([]byte, error) { + t := *template + t.SignatureAlgorithm = sigType(privateKey) + + csrDER, err := x509.CreateCertificateRequest(cryptorand.Reader, &t, privateKey) + if err != nil { + return nil, err + } + + csrPemBlock := &pem.Block{ + Type: CertificateRequestBlockType, + Bytes: csrDER, + } + + return pem.EncodeToMemory(csrPemBlock), nil +} + +func sigType(privateKey interface{}) x509.SignatureAlgorithm { + // Customize the signature for RSA keys, depending on the key size + if privateKey, ok := privateKey.(*rsa.PrivateKey); ok { + keySize := privateKey.N.BitLen() + switch { + case keySize >= 4096: + return x509.SHA512WithRSA + case keySize >= 3072: + return x509.SHA384WithRSA + default: + return x509.SHA256WithRSA + } + } + return x509.UnknownSignatureAlgorithm +} diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go new file mode 100644 index 000000000..b6b690383 --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/io.go @@ -0,0 +1,150 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + "crypto/x509" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// CanReadCertAndKey returns true if the certificate and key files already exists, +// otherwise returns false. If lost one of cert and key, returns error. +func CanReadCertAndKey(certPath, keyPath string) (bool, error) { + certReadable := canReadFile(certPath) + keyReadable := canReadFile(keyPath) + + if certReadable == false && keyReadable == false { + return false, nil + } + + if certReadable == false { + return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", certPath) + } + + if keyReadable == false { + return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", keyPath) + } + + return true, nil +} + +// If the file represented by path exists and +// readable, returns true otherwise returns false. +func canReadFile(path string) bool { + f, err := os.Open(path) + if err != nil { + return false + } + + defer f.Close() + + return true +} + +// WriteCert writes the pem-encoded certificate data to certPath. +// The certificate file will be created with file mode 0644. +// If the certificate file already exists, it will be overwritten. +// The parent directory of the certPath will be created as needed with file mode 0755. +func WriteCert(certPath string, data []byte) error { + if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil { + return err + } + if err := ioutil.WriteFile(certPath, data, os.FileMode(0644)); err != nil { + return err + } + return nil +} + +// WriteKey writes the pem-encoded key data to keyPath. +// The key file will be created with file mode 0600. +// If the key file already exists, it will be overwritten. +// The parent directory of the keyPath will be created as needed with file mode 0755. +func WriteKey(keyPath string, data []byte) error { + if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { + return err + } + if err := ioutil.WriteFile(keyPath, data, os.FileMode(0600)); err != nil { + return err + } + return nil +} + +// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it +// can't find one, it will generate a new key and store it there. +func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { + loadedData, err := ioutil.ReadFile(keyPath) + if err == nil { + return loadedData, false, err + } + if !os.IsNotExist(err) { + return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err) + } + + generatedData, err := MakeEllipticPrivateKeyPEM() + if err != nil { + return nil, false, fmt.Errorf("error generating key: %v", err) + } + if err := WriteKey(keyPath, generatedData); err != nil { + return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err) + } + return generatedData, true, nil +} + +// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file. +// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates +func NewPool(filename string) (*x509.CertPool, error) { + certs, err := CertsFromFile(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} + +// CertsFromFile returns the x509.Certificates contained in the given PEM-encoded file. +// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates +func CertsFromFile(file string) ([]*x509.Certificate, error) { + pemBlock, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + certs, err := ParseCertsPEM(pemBlock) + if err != nil { + return nil, fmt.Errorf("error reading %s: %s", file, err) + } + return certs, nil +} + +// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file. +// Returns an error if the file could not be read or if the private key could not be parsed. +func PrivateKeyFromFile(file string) (interface{}, error) { + pemBlock, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + key, err := ParsePrivateKeyPEM(pemBlock) + if err != nil { + return nil, fmt.Errorf("error reading %s: %v", file, err) + } + return key, nil +} diff --git a/vendor/k8s.io/client-go/util/cert/pem.go b/vendor/k8s.io/client-go/util/cert/pem.go new file mode 100644 index 000000000..899845857 --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/pem.go @@ -0,0 +1,138 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" +) + +const ( + // ECPrivateKeyBlockType is a possible value for pem.Block.Type. + ECPrivateKeyBlockType = "EC PRIVATE KEY" + // RSAPrivateKeyBlockType is a possible value for pem.Block.Type. + RSAPrivateKeyBlockType = "RSA PRIVATE KEY" + // CertificateBlockType is a possible value for pem.Block.Type. + CertificateBlockType = "CERTIFICATE" + // CertificateRequestBlockType is a possible value for pem.Block.Type. + CertificateRequestBlockType = "CERTIFICATE REQUEST" + // PrivateKeyBlockType is a possible value for pem.Block.Type. + PrivateKeyBlockType = "PRIVATE KEY" + // PublicKeyBlockType is a possible value for pem.Block.Type. + PublicKeyBlockType = "PUBLIC KEY" +) + +// EncodePublicKeyPEM returns PEM-endcode public data +func EncodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) { + der, err := x509.MarshalPKIXPublicKey(key) + if err != nil { + return []byte{}, err + } + block := pem.Block{ + Type: PublicKeyBlockType, + Bytes: der, + } + return pem.EncodeToMemory(&block), nil +} + +// EncodePrivateKeyPEM returns PEM-encoded private key data +func EncodePrivateKeyPEM(key *rsa.PrivateKey) []byte { + block := pem.Block{ + Type: RSAPrivateKeyBlockType, + Bytes: x509.MarshalPKCS1PrivateKey(key), + } + return pem.EncodeToMemory(&block) +} + +// EncodeCertPEM returns PEM-endcoded certificate data +func EncodeCertPEM(cert *x509.Certificate) []byte { + block := pem.Block{ + Type: CertificateBlockType, + Bytes: cert.Raw, + } + return pem.EncodeToMemory(&block) +} + +// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data. +// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY" +func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) { + var privateKeyPemBlock *pem.Block + for { + privateKeyPemBlock, keyData = pem.Decode(keyData) + if privateKeyPemBlock == nil { + break + } + + switch privateKeyPemBlock.Type { + case ECPrivateKeyBlockType: + // ECDSA Private Key in ASN.1 format + if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + case RSAPrivateKeyBlockType: + // RSA Private Key in PKCS#1 format + if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + case PrivateKeyBlockType: + // RSA or ECDSA Private Key in unencrypted PKCS#8 format + if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + } + + // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks + // originally, only the first PEM block was parsed and expected to be a key block + } + + // we read all the PEM blocks and didn't recognize one + return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key") +} + +// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array +// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates +func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { + ok := false + certs := []*x509.Certificate{} + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + // Only use PEM "CERTIFICATE" blocks without extra headers + if block.Type != CertificateBlockType || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return certs, err + } + + certs = append(certs, cert) + ok = true + } + + if !ok { + return certs, errors.New("could not read any certificates") + } + return certs, nil +} diff --git a/vendor/k8s.io/client-go/util/exec/exec.go b/vendor/k8s.io/client-go/util/exec/exec.go new file mode 100644 index 000000000..d170badb6 --- /dev/null +++ b/vendor/k8s.io/client-go/util/exec/exec.go @@ -0,0 +1,52 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +// ExitError is an interface that presents an API similar to os.ProcessState, which is +// what ExitError from os/exec is. This is designed to make testing a bit easier and +// probably loses some of the cross-platform properties of the underlying library. +type ExitError interface { + String() string + Error() string + Exited() bool + ExitStatus() int +} + +// CodeExitError is an implementation of ExitError consisting of an error object +// and an exit code (the upper bits of os.exec.ExitStatus). +type CodeExitError struct { + Err error + Code int +} + +var _ ExitError = CodeExitError{} + +func (e CodeExitError) Error() string { + return e.Err.Error() +} + +func (e CodeExitError) String() string { + return e.Err.Error() +} + +func (e CodeExitError) Exited() bool { + return true +} + +func (e CodeExitError) ExitStatus() int { + return e.Code +} diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go new file mode 100644 index 000000000..71d442a62 --- /dev/null +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -0,0 +1,149 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/client-go/util/integer" +) + +type backoffEntry struct { + backoff time.Duration + lastUpdate time.Time +} + +type Backoff struct { + sync.Mutex + Clock clock.Clock + defaultDuration time.Duration + maxDuration time.Duration + perItemBackoff map[string]*backoffEntry +} + +func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff { + return &Backoff{ + perItemBackoff: map[string]*backoffEntry{}, + Clock: tc, + defaultDuration: initial, + maxDuration: max, + } +} + +func NewBackOff(initial, max time.Duration) *Backoff { + return &Backoff{ + perItemBackoff: map[string]*backoffEntry{}, + Clock: clock.RealClock{}, + defaultDuration: initial, + maxDuration: max, + } +} + +// Get the current backoff Duration +func (p *Backoff) Get(id string) time.Duration { + p.Lock() + defer p.Unlock() + var delay time.Duration + entry, ok := p.perItemBackoff[id] + if ok { + delay = entry.backoff + } + return delay +} + +// move backoff to the next mark, capping at maxDuration +func (p *Backoff) Next(id string, eventTime time.Time) { + p.Lock() + defer p.Unlock() + entry, ok := p.perItemBackoff[id] + if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + entry = p.initEntryUnsafe(id) + } else { + delay := entry.backoff * 2 // exponential + entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration))) + } + entry.lastUpdate = p.Clock.Now() +} + +// Reset forces clearing of all backoff data for a given key. +func (p *Backoff) Reset(id string) { + p.Lock() + defer p.Unlock() + delete(p.perItemBackoff, id) +} + +// Returns True if the elapsed time since eventTime is smaller than the current backoff window +func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { + p.Lock() + defer p.Unlock() + entry, ok := p.perItemBackoff[id] + if !ok { + return false + } + if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + return false + } + return p.Clock.Now().Sub(eventTime) < entry.backoff +} + +// Returns True if time since lastupdate is less than the current backoff window. +func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool { + p.Lock() + defer p.Unlock() + entry, ok := p.perItemBackoff[id] + if !ok { + return false + } + if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + return false + } + return eventTime.Sub(entry.lastUpdate) < entry.backoff +} + +// Garbage collect records that have aged past maxDuration. Backoff users are expected +// to invoke this periodically. +func (p *Backoff) GC() { + p.Lock() + defer p.Unlock() + now := p.Clock.Now() + for id, entry := range p.perItemBackoff { + if now.Sub(entry.lastUpdate) > p.maxDuration*2 { + // GC when entry has not been updated for 2*maxDuration + delete(p.perItemBackoff, id) + } + } +} + +func (p *Backoff) DeleteEntry(id string) { + p.Lock() + defer p.Unlock() + delete(p.perItemBackoff, id) +} + +// Take a lock on *Backoff, before calling initEntryUnsafe +func (p *Backoff) initEntryUnsafe(id string) *backoffEntry { + entry := &backoffEntry{backoff: p.defaultDuration} + p.perItemBackoff[id] = entry + return entry +} + +// After 2*maxDuration we restart the backoff factor to the beginning +func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool { + return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration +} diff --git a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go new file mode 100644 index 000000000..c45169c40 --- /dev/null +++ b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go @@ -0,0 +1,148 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "sync" + + "github.com/juju/ratelimit" +) + +type RateLimiter interface { + // TryAccept returns true if a token is taken immediately. Otherwise, + // it returns false. + TryAccept() bool + // Accept returns once a token becomes available. + Accept() + // Stop stops the rate limiter, subsequent calls to CanAccept will return false + Stop() + // Saturation returns a percentage number which describes how saturated + // this rate limiter is. + // Usually we use token bucket rate limiter. In that case, + // 1.0 means no tokens are available; 0.0 means we have a full bucket of tokens to use. + Saturation() float64 + // QPS returns QPS of this rate limiter + QPS() float32 +} + +type tokenBucketRateLimiter struct { + limiter *ratelimit.Bucket + qps float32 +} + +// NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach. +// The rate limiter allows bursts of up to 'burst' to exceed the QPS, while still maintaining a +// smoothed qps rate of 'qps'. +// The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'. +// The maximum number of tokens in the bucket is capped at 'burst'. +func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter { + limiter := ratelimit.NewBucketWithRate(float64(qps), int64(burst)) + return newTokenBucketRateLimiter(limiter, qps) +} + +// An injectable, mockable clock interface. +type Clock interface { + ratelimit.Clock +} + +// NewTokenBucketRateLimiterWithClock is identical to NewTokenBucketRateLimiter +// but allows an injectable clock, for testing. +func NewTokenBucketRateLimiterWithClock(qps float32, burst int, clock Clock) RateLimiter { + limiter := ratelimit.NewBucketWithRateAndClock(float64(qps), int64(burst), clock) + return newTokenBucketRateLimiter(limiter, qps) +} + +func newTokenBucketRateLimiter(limiter *ratelimit.Bucket, qps float32) RateLimiter { + return &tokenBucketRateLimiter{ + limiter: limiter, + qps: qps, + } +} + +func (t *tokenBucketRateLimiter) TryAccept() bool { + return t.limiter.TakeAvailable(1) == 1 +} + +func (t *tokenBucketRateLimiter) Saturation() float64 { + capacity := t.limiter.Capacity() + avail := t.limiter.Available() + return float64(capacity-avail) / float64(capacity) +} + +// Accept will block until a token becomes available +func (t *tokenBucketRateLimiter) Accept() { + t.limiter.Wait(1) +} + +func (t *tokenBucketRateLimiter) Stop() { +} + +func (t *tokenBucketRateLimiter) QPS() float32 { + return t.qps +} + +type fakeAlwaysRateLimiter struct{} + +func NewFakeAlwaysRateLimiter() RateLimiter { + return &fakeAlwaysRateLimiter{} +} + +func (t *fakeAlwaysRateLimiter) TryAccept() bool { + return true +} + +func (t *fakeAlwaysRateLimiter) Saturation() float64 { + return 0 +} + +func (t *fakeAlwaysRateLimiter) Stop() {} + +func (t *fakeAlwaysRateLimiter) Accept() {} + +func (t *fakeAlwaysRateLimiter) QPS() float32 { + return 1 +} + +type fakeNeverRateLimiter struct { + wg sync.WaitGroup +} + +func NewFakeNeverRateLimiter() RateLimiter { + rl := fakeNeverRateLimiter{} + rl.wg.Add(1) + return &rl +} + +func (t *fakeNeverRateLimiter) TryAccept() bool { + return false +} + +func (t *fakeNeverRateLimiter) Saturation() float64 { + return 1 +} + +func (t *fakeNeverRateLimiter) Stop() { + t.wg.Done() +} + +func (t *fakeNeverRateLimiter) Accept() { + t.wg.Wait() +} + +func (t *fakeNeverRateLimiter) QPS() float32 { + return 1 +} diff --git a/vendor/k8s.io/client-go/util/integer/integer.go b/vendor/k8s.io/client-go/util/integer/integer.go new file mode 100644 index 000000000..c6ea106f9 --- /dev/null +++ b/vendor/k8s.io/client-go/util/integer/integer.go @@ -0,0 +1,67 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integer + +func IntMax(a, b int) int { + if b > a { + return b + } + return a +} + +func IntMin(a, b int) int { + if b < a { + return b + } + return a +} + +func Int32Max(a, b int32) int32 { + if b > a { + return b + } + return a +} + +func Int32Min(a, b int32) int32 { + if b < a { + return b + } + return a +} + +func Int64Max(a, b int64) int64 { + if b > a { + return b + } + return a +} + +func Int64Min(a, b int64) int64 { + if b < a { + return b + } + return a +} + +// RoundToInt32 rounds floats into integer numbers. +func RoundToInt32(a float64) int32 { + if a < 0 { + return int32(a - 0.5) + } + return int32(a + 0.5) +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go b/vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go new file mode 100644 index 000000000..e1080a34a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go @@ -0,0 +1,113 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file should be consistent with pkg/api/v1/annotation_key_constants.go. + +package api + +const ( + // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy + // webhook backend fails. + ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" + + // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation + PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude" + + // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods + MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" + + // TolerationsAnnotationKey represents the key of tolerations data (json serialized) + // in the Annotations of a Pod. + TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" + + // TaintsAnnotationKey represents the key of taints data (json serialized) + // in the Annotations of a Node. + TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" + + // SeccompPodAnnotationKey represents the key of a seccomp profile applied + // to all containers of a pod. + SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" + + // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied + // to one container of a pod. + SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" + + // CreatedByAnnotation represents the key used to store the spec(json) + // used to create the resource. + CreatedByAnnotation = "kubernetes.io/created-by" + + // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) + // in the Annotations of a Node. + PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" + + // SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by + // the kubelet. Pods with other sysctls will fail to launch. + SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls" + + // UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly + // namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use + // is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet + // will fail to launch. + UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls" + + // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // an object (e.g. secret, config map) before fetching it again from apiserver. + // This annotation can be attached to node. + ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + + // AffinityAnnotationKey represents the key of affinity data (json serialized) + // in the Annotations of a Pod. + // TODO: remove when alpha support for affinity is removed + AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" + + // annotation key prefix used to identify non-convertible json paths. + NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" + + kubectlPrefix = "kubectl.kubernetes.io/" + + // LastAppliedConfigAnnotation is the annotation used to store the previous + // configuration of a resource for use in a three way diff by UpdateApplyAnnotation. + LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" + + // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers + // + // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to + // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow + // access only from the CIDRs currently allocated to MIT & the USPS. + // + // Not all cloud providers support this annotation, though AWS & GCE do. + AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" + + // AnnotationValueExternalTrafficLocal Value of annotation to specify local endpoints behavior. + AnnotationValueExternalTrafficLocal = "OnlyLocal" + // AnnotationValueExternalTrafficGlobal Value of annotation to specify global (legacy) behavior. + AnnotationValueExternalTrafficGlobal = "Global" + + // TODO: The beta annotations have been deprecated, remove them when we release k8s 1.8. + + // BetaAnnotationHealthCheckNodePort Annotation specifying the healthcheck nodePort for the service. + // If not specified, annotation is created by the service api backend with the allocated nodePort. + // Will use user-specified nodePort value if specified by the client. + BetaAnnotationHealthCheckNodePort = "service.beta.kubernetes.io/healthcheck-nodeport" + + // BetaAnnotationExternalTraffic An annotation that denotes if this Service desires to route + // external traffic to local endpoints only. This preserves Source IP and avoids a second hop. + BetaAnnotationExternalTraffic = "service.beta.kubernetes.io/external-traffic" +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/doc.go b/vendor/k8s.io/kubernetes/pkg/api/doc.go new file mode 100644 index 000000000..283a83e40 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register + +// Package api contains the latest (or "internal") version of the +// Kubernetes API objects. This is the API objects as represented in memory. +// The contract presented to clients is located in the versioned packages, +// which are sub-directories. The first one is "v1". Those packages +// describe how a particular version is serialized to storage/network. +package api // import "k8s.io/kubernetes/pkg/api" diff --git a/vendor/k8s.io/kubernetes/pkg/api/field_constants.go b/vendor/k8s.io/kubernetes/pkg/api/field_constants.go new file mode 100644 index 000000000..5ead0f13f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/field_constants.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +// Field path constants that are specific to the internal API +// representation. +const ( + NodeUnschedulableField = "spec.unschedulable" + ObjectNameField = "metadata.name" + PodHostField = "spec.nodeName" + PodStatusField = "status.phase" + SecretTypeField = "type" + + EventReasonField = "reason" + EventSourceField = "source" + EventTypeField = "type" + EventInvolvedKindField = "involvedObject.kind" + EventInvolvedNamespaceField = "involvedObject.namespace" + EventInvolvedNameField = "involvedObject.name" + EventInvolvedUIDField = "involvedObject.uid" + EventInvolvedAPIVersionField = "involvedObject.apiVersion" + EventInvolvedResourceVersionField = "involvedObject.resourceVersion" + EventInvolvedFieldPathField = "involvedObject.fieldPath" +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/json.go b/vendor/k8s.io/kubernetes/pkg/api/json.go new file mode 100644 index 000000000..3a6e04c18 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/json.go @@ -0,0 +1,28 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import "encoding/json" + +// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations +// to prevent anyone from marshaling these internal structs. + +var _ = json.Marshaler(&AvoidPods{}) +var _ = json.Unmarshaler(&AvoidPods{}) + +func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") } +func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") } diff --git a/vendor/k8s.io/kubernetes/pkg/api/objectreference.go b/vendor/k8s.io/kubernetes/pkg/api/objectreference.go new file mode 100644 index 000000000..b36ecab27 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/objectreference.go @@ -0,0 +1,34 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package api + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/kubernetes/pkg/api/register.go b/vendor/k8s.io/kubernetes/pkg/api/register.go new file mode 100644 index 000000000..5f261bdfb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/register.go @@ -0,0 +1,123 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "os" + + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// GroupFactoryRegistry is the APIGroupFactoryRegistry (overlaps a bit with Registry, see comments in package for details) +var GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry) + +// Registry is an instance of an API registry. This is an interim step to start removing the idea of a global +// API registry. +var Registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS")) + +// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. +// NOTE: If you are copying this file to start a new api group, STOP! Copy the +// extensions group instead. This Scheme is special and should appear ONLY in +// the api group, unless you really know what you're doing. +// TODO(lavalamp): make the above error impossible. +var Scheme = runtime.NewScheme() + +// Codecs provides access to encoding and decoding for the scheme +var Codecs = serializer.NewCodecFactory(Scheme) + +// GroupName is the group name use in this package +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(Scheme) + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { + return err + } + scheme.AddKnownTypes(SchemeGroupVersion, + &Pod{}, + &PodList{}, + &PodStatusResult{}, + &PodTemplate{}, + &PodTemplateList{}, + &ReplicationControllerList{}, + &ReplicationController{}, + &ServiceList{}, + &Service{}, + &ServiceProxyOptions{}, + &NodeList{}, + &Node{}, + &NodeProxyOptions{}, + &Endpoints{}, + &EndpointsList{}, + &Binding{}, + &Event{}, + &EventList{}, + &List{}, + &LimitRange{}, + &LimitRangeList{}, + &ResourceQuota{}, + &ResourceQuotaList{}, + &Namespace{}, + &NamespaceList{}, + &ServiceAccount{}, + &ServiceAccountList{}, + &Secret{}, + &SecretList{}, + &PersistentVolume{}, + &PersistentVolumeList{}, + &PersistentVolumeClaim{}, + &PersistentVolumeClaimList{}, + &PodAttachOptions{}, + &PodLogOptions{}, + &PodExecOptions{}, + &PodPortForwardOptions{}, + &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, + &SerializedReference{}, + &RangeAllocation{}, + &ConfigMap{}, + &ConfigMapList{}, + ) + + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource.go b/vendor/k8s.io/kubernetes/pkg/api/resource.go new file mode 100644 index 000000000..27b7fd665 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/resource.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/apimachinery/pkg/api/resource" +) + +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) NvidiaGPU() *resource.Quantity { + if val, ok := (*self)[ResourceNvidiaGPU]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) StorageOverlay() *resource.Quantity { + if val, ok := (*self)[ResourceStorageOverlay]; ok { + return &val + } + return &resource.Quantity{} +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/taint.go b/vendor/k8s.io/kubernetes/pkg/api/taint.go new file mode 100644 index 000000000..173e4e601 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/taint.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package api + +import "fmt" + +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +func (t *Taint) ToString() string { + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/toleration.go b/vendor/k8s.io/kubernetes/pkg/api/toleration.go new file mode 100644 index 000000000..edb73b74e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/toleration.go @@ -0,0 +1,30 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package api + +// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , +// if the two tolerations have same combination, regard as they match. +// TODO: uniqueness check for tolerations in api validations. +func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { + return t.Key == tolerationToMatch.Key && + t.Effect == tolerationToMatch.Effect && + t.Operator == tolerationToMatch.Operator && + t.Value == tolerationToMatch.Value +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/types.go b/vendor/k8s.io/kubernetes/pkg/api/types.go new file mode 100644 index 000000000..233f828f0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/types.go @@ -0,0 +1,3989 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// Common string formats +// --------------------- +// Many fields in this API have formatting requirements. The commonly used +// formats are defined here. +// +// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier" +// in the C language. This is captured by the following regex: +// [A-Za-z_][A-Za-z0-9_]* +// This defines the format, but not the length restriction, which should be +// specified at the definition of any field of this type. +// +// DNS_LABEL: This is a string, no more than 63 characters long, that conforms +// to the definition of a "label" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])? +// +// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms +// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// or more simply: +// DNS_LABEL(\.DNS_LABEL)* +// +// IANA_SVC_NAME: This is a string, no more than 15 characters long, that +// conforms to the definition of IANA service name in RFC 6335. +// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. +// Hypens ('-') cannot be leading or trailing character of the string +// and cannot be adjacent to other hyphens. + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. +type ObjectMeta struct { + // Name is unique within a namespace. Name is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // +optional + Name string + + // GenerateName indicates that the name should be made unique by the server prior to persisting + // it. A non-empty value for the field indicates the name will be made unique (and the name + // returned to the client will be different than the name passed). The value of this field will + // be combined with a unique suffix on the server if the Name field has not been provided. + // The provided value must be valid within the rules for Name, and may be truncated by the length + // of the suffix required to make the value unique on the server. + // + // If this field is specified, and Name is not present, the server will NOT return a 409 if the + // generated name exists - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // +optional + GenerateName string + + // Namespace defines the space within which name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // +optional + Namespace string + + // SelfLink is a URL representing this object. + // +optional + SelfLink string + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // +optional + UID types.UID + + // An opaque value that represents the version of this resource. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and values may only be valid for a particular + // resource or set of resources. Only servers will generate resource versions. + // +optional + ResourceVersion string + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + Generation int64 + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // +optional + CreationTimestamp metav1.Time + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + DeletionTimestamp *metav1.Time + + // DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion + // was requested. Represents the most recent grace period, and may only be shortened once set. + // +optional + DeletionGracePeriodSeconds *int64 + + // Labels are key value pairs that may be used to scope and select individual resources. + // Label keys are of the form: + // label-key ::= prefixed-name | name + // prefixed-name ::= prefix '/' name + // prefix ::= DNS_SUBDOMAIN + // name ::= DNS_LABEL + // The prefix is optional. If the prefix is not specified, the key is assumed to be private + // to the user. Other system components that wish to use labels must specify a prefix. The + // "kubernetes.io/" prefix is reserved for use by kubernetes components. + // +optional + Labels map[string]string + + // Annotations are unstructured key value data stored with a resource that may be set by + // external tooling. They are not queryable and should be preserved when modifying + // objects. Annotation keys have the same formatting restrictions as Label keys. See the + // comments on Labels for details. + // +optional + Annotations map[string]string + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + OwnerReferences []metav1.OwnerReference + + // An initializer is a controller which enforces some system invariant at object creation time. + // This field is a list of initializers that have not yet acted on this object. If nil or empty, + // this object has been completely initialized. Otherwise, the object is considered uninitialized + // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to + // observe uninitialized objects. + // + // When an object is created, the system will populate this list with the current set of initializers. + // Only privileged users may set or modify this list. Once it is empty, it may not be modified further + // by any user. + Initializers *metav1.Initializers + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + Finalizers []string + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + ClusterName string +} + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault string = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll string = "" + // NamespaceNone is the argument for a context when there is no namespace. + NamespaceNone string = "" + // NamespaceSystem is the system namespace where we place system components. + NamespaceSystem string = "kube-system" + // NamespacePublic is the namespace where we place public info (ConfigMaps) + NamespacePublic string = "kube-public" + // TerminationMessagePathDefault means the default path to capture the application termination message running in a container + TerminationMessagePathDefault string = "/dev/termination-log" +) + +// Volume represents a named volume in a pod that may be accessed by any containers in the pod. +type Volume struct { + // Required: This must be a DNS_LABEL. Each volume in a pod must have + // a unique name. + Name string + // The VolumeSource represents the location and type of a volume to mount. + // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + // +optional + VolumeSource +} + +// VolumeSource represents the source location of a volume to mount. +// Only one of its members may be specified. +type VolumeSource struct { + // HostPath represents file or directory on the host machine that is + // directly exposed to the container. This is generally used for system + // agents or other privileged things that are allowed to see the host + // machine. Most containers will NOT need this. + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + HostPath *HostPathVolumeSource + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // +optional + EmptyDir *EmptyDirVolumeSource + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // GitRepo represents a git repository at a particular revision. + // +optional + GitRepo *GitRepoVolumeSource + // Secret represents a secret that should populate this volume. + // +optional + Secret *SecretVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIVolumeSource + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime + // +optional + Glusterfs *GlusterfsVolumeSource + // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace + // +optional + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDVolumeSource + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. + // +optional + FlexVolume *FlexVolumeSource + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderVolumeSource + + // CephFS represents a Cephfs mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource + + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + + // DownwardAPI represents metadata about the pod that should populate this volume + // +optional + DownwardAPI *DownwardAPIVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource + // ConfigMap represents a configMap that should populate this volume + // +optional + ConfigMap *ConfigMapVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // +optional + StorageOS *StorageOSVolumeSource +} + +// Similar to VolumeSource but meant for the administrator who creates PVs. +// Exactly one of its members must be set. +type PersistentVolumeSource struct { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // +optional + HostPath *HostPathVolumeSource + // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod + // +optional + Glusterfs *GlusterfsVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDVolumeSource + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + // ISCSIVolumeSource represents an ISCSI resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIVolumeSource + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. + // +optional + FlexVolume *FlexVolumeSource + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderVolumeSource + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource + // Local represents directly-attached storage with node affinity + // +optional + Local *LocalVolumeSource + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // +optional + StorageOS *StorageOSPersistentVolumeSource +} + +type PersistentVolumeClaimVolumeSource struct { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume + ClaimName string + // Optional: Defaults to false (read/write). ReadOnly here + // will force the ReadOnly setting in VolumeMounts + // +optional + ReadOnly bool +} + +const ( + // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. + // It's currently still used and will be held for backwards compatibility + BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" + + // MountOptionAnnotation defines mount option annotation used in PVs + MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" + + // AlphaStorageNodeAffinityAnnotation defines node affinity policies for a PersistentVolume. + // Value is a string of the json representation of type NodeAffinity + AlphaStorageNodeAffinityAnnotation = "volume.alpha.kubernetes.io/node-affinity" +) + +// +genclient=true +// +nonNamespaced=true + +type PersistentVolume struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + //Spec defines a persistent volume owned by the cluster + // +optional + Spec PersistentVolumeSpec + + // Status represents the current information about persistent volume. + // +optional + Status PersistentVolumeStatus +} + +type PersistentVolumeSpec struct { + // Resources represents the actual resources of the volume + Capacity ResourceList + // Source represents the location and type of a volume to mount. + PersistentVolumeSource + // AccessModes contains all ways the volume can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // ClaimRef is expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is + // ignored, i.e. labels of this PV do not need to match PVC selector. + // +optional + ClaimRef *ObjectReference + // Optional: what happens to a persistent volume when released from its claim. + // +optional + PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + StorageClassName string +} + +// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes +type PersistentVolumeReclaimPolicy string + +const ( + // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. + // The volume plugin must support Recycling. + PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" + // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. + // The volume plugin must support Deletion. + PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" + // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. + // The default policy is Retain. + PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" +) + +type PersistentVolumeStatus struct { + // Phase indicates if a volume is available, bound to a claim, or released by a claim + // +optional + Phase PersistentVolumePhase + // A human-readable message indicating details about why the volume is in this state. + // +optional + Message string + // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI + // +optional + Reason string +} + +type PersistentVolumeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolume +} + +// +genclient=true + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +type PersistentVolumeClaim struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the volume requested by a pod author + // +optional + Spec PersistentVolumeClaimSpec + + // Status represents the current information about a claim + // +optional + Status PersistentVolumeClaimStatus +} + +type PersistentVolumeClaimList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolumeClaim +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +type PersistentVolumeClaimSpec struct { + // Contains the types of access modes required + // +optional + AccessModes []PersistentVolumeAccessMode + // A label query over volumes to consider for binding. This selector is + // ignored when VolumeName is set + // +optional + Selector *metav1.LabelSelector + // Resources represents the minimum resources required + // +optional + Resources ResourceRequirements + // VolumeName is the binding reference to the PersistentVolume backing this + // claim. When set to non-empty value Selector is not evaluated + // +optional + VolumeName string + // Name of the StorageClass required by the claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 + // +optional + StorageClassName *string +} + +type PersistentVolumeClaimStatus struct { + // Phase represents the current phase of PersistentVolumeClaim + // +optional + Phase PersistentVolumeClaimPhase + // AccessModes contains all ways the volume backing the PVC can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // Represents the actual resources of the underlying volume + // +optional + Capacity ResourceList +} + +type PersistentVolumeAccessMode string + +const ( + // can be mounted read/write mode to exactly 1 host + ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" + // can be mounted in read-only mode to many hosts + ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" + // can be mounted in read/write mode to many hosts + ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" +) + +type PersistentVolumePhase string + +const ( + // used for PersistentVolumes that are not available + VolumePending PersistentVolumePhase = "Pending" + // used for PersistentVolumes that are not yet bound + // Available volumes are held by the binder and matched to PersistentVolumeClaims + VolumeAvailable PersistentVolumePhase = "Available" + // used for PersistentVolumes that are bound + VolumeBound PersistentVolumePhase = "Bound" + // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted + // released volumes must be recycled before becoming available again + // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource + VolumeReleased PersistentVolumePhase = "Released" + // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim + VolumeFailed PersistentVolumePhase = "Failed" +) + +type PersistentVolumeClaimPhase string + +const ( + // used for PersistentVolumeClaims that are not yet bound + ClaimPending PersistentVolumeClaimPhase = "Pending" + // used for PersistentVolumeClaims that are bound + ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" +) + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +type HostPathVolumeSource struct { + Path string +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +type EmptyDirVolumeSource struct { + // TODO: Longer term we want to represent the selection of underlying + // media more like a scheduling problem - user says what traits they + // need, we give them a backing store that satisfies that. For now + // this will cover the most common needs. + // Optional: what type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // +optional + Medium StorageMedium + // Total amount of local storage required for this EmptyDir volume. + // The size limit is also applicable for memory medium. + // The maximum usage on memory medium EmptyDir would be the minimum value between + // the SizeLimit specified here and the sum of memory limits of all containers in a pod. + // The default is nil which means that the limit is undefined. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + SizeLimit resource.Quantity +} + +// StorageMedium defines ways that storage can be allocated to a volume. +type StorageMedium string + +const ( + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node + StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) +) + +// Protocol defines network protocols supported for things like container ports. +type Protocol string + +const ( + // ProtocolTCP is the TCP protocol. + ProtocolTCP Protocol = "TCP" + // ProtocolUDP is the UDP protocol. + ProtocolUDP Protocol = "UDP" +) + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +type GCEPersistentDiskVolumeSource struct { + // Unique name of the PD resource. Used to identify the disk in GCE + PDName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIVolumeSource struct { + // Required: iSCSI target portal + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + TargetPortal string + // Required: target iSCSI Qualified Name + // +optional + IQN string + // Required: iSCSI target lun number + // +optional + Lun int32 + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string + // Optional: whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool + // Optional: whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool + // Optional: CHAP secret for iSCSI target and initiator authentication. + // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true + // +optional + SecretRef *LocalObjectReference +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +type FCVolumeSource struct { + // Required: FC target worldwide names (WWNs) + TargetWWNs []string + // Required: FC target lun number + Lun *int32 + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +type FlexVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: Extra driver options if any. + // +optional + Options map[string]string +} + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +type AWSElasticBlockStoreVolumeSource struct { + // Unique id of the persistent disk resource. Used to identify the disk in AWS + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +type GitRepoVolumeSource struct { + // Repository URL + Repository string + // Commit hash, this is optional + // +optional + Revision string + // Clone target, this is optional + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + Directory string + // TODO: Consider credentials here. +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +type SecretVolumeSource struct { + // Name of the secret in the pod's namespace to use. + // +optional + SecretName string + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +type NFSVolumeSource struct { + // Server is the hostname or IP address of the NFS server + Server string + + // Path is the exported NFS share + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the NFS export to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +type QuobyteVolumeSource struct { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + Registry string + + // Volume is a string that references an already created Quobyte volume by name. + Volume string + + // Defaults to false (read/write). ReadOnly here will force + // the Quobyte to be mounted with read-only permissions + // +optional + ReadOnly bool + + // User to map volume access to + // Defaults to the root user + // +optional + User string + + // Group to map volume access to + // Default is no group + // +optional + Group string +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsVolumeSource struct { + // Required: EndpointsName is the endpoint name that details Glusterfs topology + EndpointsName string + + // Required: Path is the Glusterfs volume path + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the Glusterfs to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDVolumeSource struct { + // Required: CephMonitors is a collection of Ceph monitors + CephMonitors []string + // Required: RBDImage is the rados image name + RBDImage string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: RadosPool is the rados pool name,default is rbd + // +optional + RBDPool string + // Optional: RBDUser is the rados user name, default is admin + // +optional + RadosUser string + // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring + // +optional + Keyring string + // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a cinder volume resource in Openstack. A Cinder volume +// must exist before mounting to a container. The volume must also be +// in the same region as the kubelet. Cinder volumes support ownership +// management and SELinux relabeling. +type CinderVolumeSource struct { + // Unique id of the volume used to identify the cinder volume + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + Monitors []string + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string + // Optional: User is the rados user name, default is admin + // +optional + User string + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // +optional + SecretFile string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +type FlockerVolumeSource struct { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + DatasetName string + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + DatasetUUID string +} + +// Represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +type DownwardAPIVolumeSource struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Represents a single file containing information from the downward API +type DownwardAPIVolumeFile struct { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + Path string + // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFileVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string + // Share Name + ShareName string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a vSphere volume resource. +type VsphereVirtualDiskVolumeSource struct { + // Path that identifies vSphere volume vmdk + VolumePath string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Storage Policy Based Management (SPBM) profile name. + // +optional + StoragePolicyName string + // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + // +optional + StoragePolicyID string +} + +// Represents a Photon Controller persistent disk resource. +type PhotonPersistentDiskVolumeSource struct { + // ID that identifies Photon Controller persistent disk + PdID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + FSType string +} + +// PortworxVolumeSource represents a Portworx volume resource. +type PortworxVolumeSource struct { + // VolumeID uniquely identifies a Portworx volume + VolumeID string + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +type AzureDataDiskCachingMode string +type AzureDataDiskKind string + +const ( + AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" + AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" + AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" + + AzureSharedBlobDisk AzureDataDiskKind = "Shared" + AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated" + AzureManagedDisk AzureDataDiskKind = "Managed" +) + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +type AzureDiskVolumeSource struct { + // The Name of the data disk in the blob storage + DiskName string + // The URI the the data disk in the blob storage + DataDiskURI string + // Host Caching mode: None, Read Only, Read Write. + // +optional + CachingMode *AzureDataDiskCachingMode + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType *string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool + // Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + Kind *AzureDataDiskKind +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +type ScaleIOVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string + // The name of the storage system as configured in ScaleIO. + System string + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *LocalObjectReference + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool + // The name of the Protection Domain for the configured storage (defaults to "default"). + // +optional + ProtectionDomain string + // The Storage Pool associated with the protection domain (defaults to "default"). + // +optional + StoragePool string + // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // +optional + StorageMode string + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a StorageOS persistent volume resource. +type StorageOSVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *LocalObjectReference +} + +// Represents a StorageOS persistent volume resource. +type StorageOSPersistentVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *ObjectReference +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +type ConfigMapVolumeSource struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + Secret *SecretProjection + // information about the downwardAPI data to project + DownwardAPI *DownwardAPIProjection + // information about the configMap data to project + ConfigMap *ConfigMapProjection +} + +// Maps a string key to a path within a volume. +type KeyToPath struct { + // The key to project. + Key string + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + Path string + // Optional: mode bits to use on this file, should be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// Local represents directly-attached storage with node affinity +type LocalVolumeSource struct { + // The full path to the volume on the node + // For alpha, this path must be a directory + // Once block as a source is supported, then this path can point to a block device + Path string +} + +// ContainerPort represents a network port in a single container +type ContainerPort struct { + // Optional: If specified, this must be an IANA_SVC_NAME Each named port + // in a pod must have a unique name. + // +optional + Name string + // Optional: If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // +optional + HostPort int32 + // Required: This must be a valid port number, 0 < x < 65536. + ContainerPort int32 + // Required: Supports "TCP" and "UDP". + // +optional + Protocol Protocol + // Optional: What host IP to bind the external port to. + // +optional + HostIP string +} + +// VolumeMount describes a mounting of a Volume within a container. +type VolumeMount struct { + // Required: This must match the Name of a Volume [above]. + Name string + // Optional: Defaults to false (read-write). + // +optional + ReadOnly bool + // Required. Must not contain ':'. + MountPath string + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + SubPath string +} + +// EnvVar represents an environment variable present in a Container. +type EnvVar struct { + // Required: This must be a C_IDENTIFIER. + Name string + // Optional: no more than one of the following may be specified. + // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // +optional + Value string + // Optional: Specifies a source the value of this var should come from. + // +optional + ValueFrom *EnvVarSource +} + +// EnvVarSource represents a source for the value of an EnvVar. +// Only one of its fields may be set. +type EnvVarSource struct { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Selects a key of a ConfigMap. + // +optional + ConfigMapKeyRef *ConfigMapKeySelector + // Selects a key of a secret in the pod's namespace. + // +optional + SecretKeyRef *SecretKeySelector +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +type ObjectFieldSelector struct { + // Required: Version of the schema the FieldPath is written in terms of. + // If no value is specified, it will be defaulted to the APIVersion of the + // enclosing object. + APIVersion string + // Required: Path of the field to select in the specified API version + FieldPath string +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +type ResourceFieldSelector struct { + // Container name: required for volumes, optional for env vars + // +optional + ContainerName string + // Required: resource to select + Resource string + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + Divisor resource.Quantity +} + +// Selects a key from a ConfigMap. +type ConfigMapKeySelector struct { + // The ConfigMap to select from. + LocalObjectReference + // The key to select. + Key string + // Specify whether the ConfigMap or it's key must be defined + // +optional + Optional *bool +} + +// SecretKeySelector selects a key of a Secret. +type SecretKeySelector struct { + // The name of the secret in the pod's namespace to select from. + LocalObjectReference + // The key of the secret to select from. Must be a valid secret key. + Key string + // Specify whether the Secret or it's key must be defined + // +optional + Optional *bool +} + +// EnvFromSource represents the source of a set of ConfigMaps +type EnvFromSource struct { + // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + Prefix string + // The ConfigMap to select from. + //+optional + ConfigMapRef *ConfigMapEnvSource + // The Secret to select from. + //+optional + SecretRef *SecretEnvSource +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference + // Specify whether the Secret must be defined + // +optional + Optional *bool +} + +// HTTPHeader describes a custom header to be used in HTTP probes +type HTTPHeader struct { + // The header field name + Name string + // The header field value + Value string +} + +// HTTPGetAction describes an action based on HTTP Get requests. +type HTTPGetAction struct { + // Optional: Path to access on the HTTP server. + // +optional + Path string + // Required: Name or number of the port to access on the container. + // +optional + Port intstr.IntOrString + // Optional: Host name to connect to, defaults to the pod IP. You + // probably want to set "Host" in httpHeaders instead. + // +optional + Host string + // Optional: Scheme to use for connecting to the host, defaults to HTTP. + // +optional + Scheme URIScheme + // Optional: Custom headers to set in the request. HTTP allows repeated headers. + // +optional + HTTPHeaders []HTTPHeader +} + +// URIScheme identifies the scheme used for connection to a host for Get actions +type URIScheme string + +const ( + // URISchemeHTTP means that the scheme used will be http:// + URISchemeHTTP URIScheme = "HTTP" + // URISchemeHTTPS means that the scheme used will be https:// + URISchemeHTTPS URIScheme = "HTTPS" +) + +// TCPSocketAction describes an action based on opening a socket +type TCPSocketAction struct { + // Required: Port to connect to. + // +optional + Port intstr.IntOrString + // Optional: Host name to connect to, defaults to the pod IP. + // +optional + Host string +} + +// ExecAction describes a "run in container" action. +type ExecAction struct { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // +optional + Command []string +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // The action taken to determine the health of a container + Handler + // Length of time before health checking is activated. In seconds. + // +optional + InitialDelaySeconds int32 + // Length of time before health checking times out. In seconds. + // +optional + TimeoutSeconds int32 + // How often (in seconds) to perform the probe. + // +optional + PeriodSeconds int32 + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Must be 1 for liveness. + // +optional + SuccessThreshold int32 + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // +optional + FailureThreshold int32 +} + +// PullPolicy describes a policy for if/when to pull a container image +type PullPolicy string + +const ( + // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. + PullAlways PullPolicy = "Always" + // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present + PullNever PullPolicy = "Never" + // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. + PullIfNotPresent PullPolicy = "IfNotPresent" +) + +// TerminationMessagePolicy describes how termination messages are retrieved from a container. +type TerminationMessagePolicy string + +const ( + // TerminationMessageReadFile is the default behavior and will set the container status message to + // the contents of the container's terminationMessagePath when the container exits. + TerminationMessageReadFile TerminationMessagePolicy = "File" + // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs + // for the container status message when the container exits with an error and the + // terminationMessagePath has no contents. + TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" +) + +// Capability represent POSIX capabilities type +type Capability string + +// Capabilities represent POSIX capabilities that can be added or removed to a running container. +type Capabilities struct { + // Added capabilities + // +optional + Add []Capability + // Removed capabilities + // +optional + Drop []Capability +} + +// ResourceRequirements describes the compute resource requirements. +type ResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // +optional + Limits ResourceList + // Requests describes the minimum amount of compute resources required. + // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value + // +optional + Requests ResourceList +} + +// Container represents a single container that is expected to be run on the host. +type Container struct { + // Required: This must be a DNS_LABEL. Each container in a pod must + // have a unique name. + Name string + // Required. + Image string + // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Command []string + // Optional: The docker image's cmd is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Args []string + // Optional: Defaults to Docker's default. + // +optional + WorkingDir string + // +optional + Ports []ContainerPort + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource + // +optional + Env []EnvVar + // Compute resource requirements. + // +optional + Resources ResourceRequirements + // +optional + VolumeMounts []VolumeMount + // +optional + LivenessProbe *Probe + // +optional + ReadinessProbe *Probe + // +optional + Lifecycle *Lifecycle + // Required. + // +optional + TerminationMessagePath string + // +optional + TerminationMessagePolicy TerminationMessagePolicy + // Required: Policy for pulling images for this container + ImagePullPolicy PullPolicy + // Optional: SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // +optional + SecurityContext *SecurityContext + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + // +optional + Stdin bool + // +optional + StdinOnce bool + // +optional + TTY bool +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +type Handler struct { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + Exec *ExecAction + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *HTTPGetAction + // TCPSocket specifies an action involving a TCP port. + // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *TCPSocketAction +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +type Lifecycle struct { + // PostStart is called immediately after a container is created. If the handler fails, the container + // is terminated and restarted. + // +optional + PostStart *Handler + // PreStop is called immediately before a container is terminated. The reason for termination is + // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. + // +optional + PreStop *Handler +} + +// The below types are used by kube_client and api_server. + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; +// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +type ContainerStateWaiting struct { + // A brief CamelCase string indicating details about why the container is in waiting state. + // +optional + Reason string + // A human-readable message indicating details about why the container is in waiting state. + // +optional + Message string +} + +type ContainerStateRunning struct { + // +optional + StartedAt metav1.Time +} + +type ContainerStateTerminated struct { + ExitCode int32 + // +optional + Signal int32 + // +optional + Reason string + // +optional + Message string + // +optional + StartedAt metav1.Time + // +optional + FinishedAt metav1.Time + // +optional + ContainerID string +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +type ContainerState struct { + // +optional + Waiting *ContainerStateWaiting + // +optional + Running *ContainerStateRunning + // +optional + Terminated *ContainerStateTerminated +} + +type ContainerStatus struct { + // Each container in a pod must have a unique name. + Name string + // +optional + State ContainerState + // +optional + LastTerminationState ContainerState + // Ready specifies whether the container has passed its readiness check. + Ready bool + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + RestartCount int32 + Image string + ImageID string + // +optional + ContainerID string +} + +// PodPhase is a label for the condition of a pod at the current time. +type PodPhase string + +// These are the valid statuses of pods. +const ( + // PodPending means the pod has been accepted by the system, but one or more of the containers + // has not been started. This includes time before being bound to a node, as well as time spent + // pulling images onto the host. + PodPending PodPhase = "Pending" + // PodRunning means the pod has been bound to a node and all of the containers have been started. + // At least one container is still running or is in the process of being restarted. + PodRunning PodPhase = "Running" + // PodSucceeded means that all containers in the pod have voluntarily terminated + // with a container exit code of 0, and the system is not going to restart any of these containers. + PodSucceeded PodPhase = "Succeeded" + // PodFailed means that all containers in the pod have terminated, and at least one container has + // terminated in a failure (exited with a non-zero exit code or was stopped by the system). + PodFailed PodPhase = "Failed" + // PodUnknown means that for some reason the state of the pod could not be obtained, typically due + // to an error in communicating with the host of the pod. + PodUnknown PodPhase = "Unknown" +) + +type PodConditionType string + +// These are valid conditions of pod. +const ( + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" + // PodReady means the pod is able to service requests and should be added to the + // load balancing pools of all matching services. + PodReady PodConditionType = "Ready" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" + // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler + // can't schedule the pod right now, for example due to insufficient resources in the cluster. + PodReasonUnschedulable = "Unschedulable" +) + +type PodCondition struct { + Type PodConditionType + Status ConditionStatus + // +optional + LastProbeTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +// RestartPolicy describes how the container should be restarted. +// Only one of the following restart policies may be specified. +// If none of the following policies is specified, the default one +// is RestartPolicyAlways. +type RestartPolicy string + +const ( + RestartPolicyAlways RestartPolicy = "Always" + RestartPolicyOnFailure RestartPolicy = "OnFailure" + RestartPolicyNever RestartPolicy = "Never" +) + +// PodList is a list of Pods. +type PodList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Pod +} + +// DNSPolicy defines how a pod's DNS will be configured. +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst indicates that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault indicates that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" +) + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +type NodeSelector struct { + //Required. A list of node selector terms. The terms are ORed. + NodeSelectorTerms []NodeSelectorTerm +} + +// A null or empty node selector term matches no objects. +type NodeSelectorTerm struct { + //Required. A list of node selector requirements. The requirements are ANDed. + MatchExpressions []NodeSelectorRequirement +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +type NodeSelectorRequirement struct { + // The label key that the selector applies to. + Key string + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + Operator NodeSelectorOperator + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + Values []string +} + +// A node selector operator is the set of operators that can be used in +// a node selector requirement. +type NodeSelectorOperator string + +const ( + NodeSelectorOpIn NodeSelectorOperator = "In" + NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" + NodeSelectorOpExists NodeSelectorOperator = "Exists" + NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" + NodeSelectorOpGt NodeSelectorOperator = "Gt" + NodeSelectorOpLt NodeSelectorOperator = "Lt" +) + +// Affinity is a group of affinity scheduling rules. +type Affinity struct { + // Describes node affinity scheduling rules for the pod. + // +optional + NodeAffinity *NodeAffinity + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAffinity *PodAffinity + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAntiAffinity *PodAntiAffinity +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int32 + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key matches that of any node on which +// a pod of the set of pods is running. +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + // +optional + LabelSelector *metav1.LabelSelector + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + Namespaces []string + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" + // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); + // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. + // +optional + TopologyKey string +} + +// Node affinity is a group of node affinity scheduling rules. +type NodeAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // will try to eventually evict the pod from its node. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +type PreferredSchedulingTerm struct { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + Weight int32 + // A node selector term, associated with the corresponding weight. + Preference NodeSelectorTerm +} + +// The node this Taint is attached to has the effect "effect" on +// any pod that that does not tolerate the Taint. +type Taint struct { + // Required. The taint key to be applied to a node. + Key string + // Required. The taint value corresponding to the taint key. + // +optional + Value string + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + Effect TaintEffect + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + TimeAdded metav1.Time +} + +type TaintEffect string + +const ( + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // but allow all pods submitted to Kubelet without going through the scheduler + // to start, and allow all already-running pods to continue running. + // Enforced by the scheduler. + TaintEffectNoSchedule TaintEffect = "NoSchedule" + // Like TaintEffectNoSchedule, but the scheduler tries not to schedule + // new pods onto the node, rather than prohibiting new pods from scheduling + // onto the node entirely. Enforced by the scheduler. + TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to + // Kubelet without going through the scheduler to start. + // Enforced by Kubelet and the scheduler. + // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" + // Evict any already-running pods that do not tolerate the taint. + // Currently enforced by NodeController. + TaintEffectNoExecute TaintEffect = "NoExecute" +) + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +type Toleration struct { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + Key string + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + Operator TolerationOperator + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + Value string + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + Effect TaintEffect + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + TolerationSeconds *int64 +} + +// A toleration operator is the set of operators that can be used in a toleration. +type TolerationOperator string + +const ( + TolerationOpExists TolerationOperator = "Exists" + TolerationOpEqual TolerationOperator = "Equal" +) + +// PodSpec is a description of a pod +type PodSpec struct { + Volumes []Volume + // List of initialization containers belonging to the pod. + InitContainers []Container + // List of containers belonging to the pod. + Containers []Container + // +optional + RestartPolicy RestartPolicy + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // +optional + TerminationGracePeriodSeconds *int64 + // Optional duration in seconds relative to the StartTime that the pod may be active on a node + // before the system actively tries to terminate the pod; value must be positive integer + // +optional + ActiveDeadlineSeconds *int64 + // Required: Set DNS policy. + // +optional + DNSPolicy DNSPolicy + // NodeSelector is a selector which must be true for the pod to fit on a node + // +optional + NodeSelector map[string]string + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod + // The pod will be allowed to use secrets referenced by the ServiceAccount + ServiceAccountName string + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *PodSecurityContext + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // +optional + ImagePullSecrets []LocalObjectReference + // Specifies the hostname of the Pod. + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + Hostname string + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + // +optional + Subdomain string + // If specified, the pod's scheduling constraints + // +optional + Affinity *Affinity + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string + // If specified, the pod's tolerations. + // +optional + Tolerations []Toleration + // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + // file if specified. This is only valid for non-hostNetwork pods. + // +optional + HostAliases []HostAlias +} + +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +type HostAlias struct { + IP string + Hostnames []string +} + +// Sysctl defines a kernel parameter to be set +type Sysctl struct { + // Name of a property to set + Name string + // Value of a property to set + Value string +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +type PodSecurityContext struct { + // Use the host's network namespace. If this option is set, the ports that will be + // used must be specified. + // Optional: Default to false + // +k8s:conversion-gen=false + // +optional + HostNetwork bool + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostPID bool + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostIPC bool + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + SELinuxOptions *SELinuxOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsUser *types.UnixUserID + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + SupplementalGroups []types.UnixGroupID + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + FSGroup *types.UnixGroupID +} + +// PodQOSClass defines the supported qos classes of Pods. +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system. +type PodStatus struct { + // +optional + Phase PodPhase + // +optional + Conditions []PodCondition + // A human readable message indicating details about why the pod is in this state. + // +optional + Message string + // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk' + // +optional + Reason string + + // +optional + HostIP string + // +optional + PodIP string + + // Date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + StartTime *metav1.Time + // +optional + QOSClass PodQOSClass + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status + InitContainerStatuses []ContainerStatus + // The list has one entry per container in the manifest. Each entry is + // currently the output of `docker inspect`. This output format is *not* + // final and should not be relied upon. + // TODO: Make real decisions about what our info should look like. Re-enable fuzz test + // when we have done this. + // +optional + ContainerStatuses []ContainerStatus +} + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +type PodStatusResult struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// +genclient=true + +// Pod is a collection of containers, used as either input (create, update) or as output (list, get). +type Pod struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec + + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// PodTemplateSpec describes the data a pod should have when created from a template +type PodTemplateSpec struct { + // Metadata of the pods created from this template. + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec +} + +// +genclient=true + +// PodTemplate describes a template for creating copies of a predefined pod. +type PodTemplate struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Template defines the pods that will be created from this pod template + // +optional + Template PodTemplateSpec +} + +// PodTemplateList is a list of PodTemplates. +type PodTemplateList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []PodTemplate +} + +// ReplicationControllerSpec is the specification of a replication controller. +// As the internal representation of a replication controller, it may have either +// a TemplateRef or a Template set. +type ReplicationControllerSpec struct { + // Replicas is the number of desired replicas. + Replicas int32 + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // Selector is a label query over pods that should match the Replicas count. + Selector map[string]string + + // TemplateRef is a reference to an object that describes the pod that will be created if + // insufficient replicas are detected. This reference is ignored if a Template is set. + // Must be set before converting to a versioned API object + // +optional + //TemplateRef *ObjectReference + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Internally, this takes precedence over a + // TemplateRef. + // +optional + Template *PodTemplateSpec +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +type ReplicationControllerStatus struct { + // Replicas is the number of actual replicas. + Replicas int32 + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + FullyLabeledReplicas int32 + + // The number of ready replicas for this replication controller. + // +optional + ReadyReplicas int32 + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + AvailableReplicas int32 + + // ObservedGeneration is the most recent generation observed by the controller. + // +optional + ObservedGeneration int64 + + // Represents the latest available observations of a replication controller's current state. + // +optional + Conditions []ReplicationControllerCondition +} + +type ReplicationControllerConditionType string + +// These are valid conditions of a replication controller. +const ( + // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods + // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, + // etc. or deleted due to kubelet being down or finalizers are failing. + ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" +) + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +type ReplicationControllerCondition struct { + // Type of replication controller condition. + Type ReplicationControllerConditionType + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + // +optional + Reason string + // A human readable message indicating details about the transition. + // +optional + Message string +} + +// +genclient=true + +// ReplicationController represents the configuration of a replication controller. +type ReplicationController struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired behavior of this replication controller. + // +optional + Spec ReplicationControllerSpec + + // Status is the current status of this replication controller. This data may be + // out of date by some window of time. + // +optional + Status ReplicationControllerStatus +} + +// ReplicationControllerList is a collection of replication controllers. +type ReplicationControllerList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ReplicationController +} + +const ( + // ClusterIPNone - do not assign a cluster IP + // no proxying required and no environment variables should be created for pods + ClusterIPNone = "None" +) + +// ServiceList holds a list of services. +type ServiceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Service +} + +// Session Affinity Type string +type ServiceAffinity string + +const ( + // ServiceAffinityClientIP is the Client IP based. + ServiceAffinityClientIP ServiceAffinity = "ClientIP" + + // ServiceAffinityNone - no session affinity. + ServiceAffinityNone ServiceAffinity = "None" +) + +// Service Type string describes ingress methods for a service +type ServiceType string + +const ( + // ServiceTypeClusterIP means a service will only be accessible inside the + // cluster, via the ClusterIP. + ServiceTypeClusterIP ServiceType = "ClusterIP" + + // ServiceTypeNodePort means a service will be exposed on one port of + // every node, in addition to 'ClusterIP' type. + ServiceTypeNodePort ServiceType = "NodePort" + + // ServiceTypeLoadBalancer means a service will be exposed via an + // external load balancer (if the cloud provider supports it), in addition + // to 'NodePort' type. + ServiceTypeLoadBalancer ServiceType = "LoadBalancer" + + // ServiceTypeExternalName means a service consists of only a reference to + // an external name that kubedns or equivalent will return as a CNAME + // record, with no exposing or proxying of any pods involved. + ServiceTypeExternalName ServiceType = "ExternalName" +) + +// Service External Traffic Policy Type string +type ServiceExternalTrafficPolicyType string + +const ( + // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. + ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" + // ServiceExternalTrafficPolicyTypeCluster specifies cluster-wide (legacy) behavior. + ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" +) + +// ServiceStatus represents the current status of a service +type ServiceStatus struct { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + LoadBalancer LoadBalancerStatus +} + +// LoadBalancerStatus represents the status of a load-balancer +type LoadBalancerStatus struct { + // Ingress is a list containing ingress points for the load-balancer; + // traffic intended for the service should be sent to these ingress points. + // +optional + Ingress []LoadBalancerIngress +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +type LoadBalancerIngress struct { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + IP string + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + Hostname string +} + +// ServiceSpec describes the attributes that a user creates on a service +type ServiceSpec struct { + // Type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + // +optional + Type ServiceType + + // Required: The list of ports that are exposed by this service. + Ports []ServicePort + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + Selector map[string]string + + // ClusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + ClusterIP string + + // ExternalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid DNS name and requires Type to be ExternalName. + ExternalName string + + // ExternalIPs are used by external load balancers, or can be set by + // users to handle external traffic that arrives at a node. + // +optional + ExternalIPs []string + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + LoadBalancerIP string + + // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. + // +optional + SessionAffinity ServiceAffinity + + // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // +optional + LoadBalancerSourceRanges []string + + // externalTrafficPolicy denotes if this Service desires to route external + // traffic to node-local or cluster-wide endpoints. "Local" preserves the + // client source IP and avoids a second hop for LoadBalancer and Nodeport + // type services, but risks potentially imbalanced traffic spreading. + // "Cluster" obscures the client source IP and may cause a second hop to + // another node, but should have good overall load-spreading. + // +optional + ExternalTrafficPolicy ServiceExternalTrafficPolicyType + + // healthCheckNodePort specifies the healthcheck nodePort for the service. + // If not specified, HealthCheckNodePort is created by the service api + // backend with the allocated nodePort. Will use user-specified nodePort value + // if specified by the client. Only effects when Type is set to LoadBalancer + // and ExternalTrafficPolicy is set to Local. + // +optional + HealthCheckNodePort int32 +} + +type ServicePort struct { + // Optional if only one ServicePort is defined on this service: The + // name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. + Name string + + // The IP protocol for this port. Supports "TCP" and "UDP". + Protocol Protocol + + // The port that will be exposed on the service. + Port int32 + + // Optional: The target port on pods selected by this service. If this + // is a string, it will be looked up as a named port in the target + // Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + TargetPort intstr.IntOrString + + // The port on each node on which this service is exposed. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + NodePort int32 +} + +// +genclient=true + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +type Service struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a service. + // +optional + Spec ServiceSpec + + // Status represents the current status of a service. + // +optional + Status ServiceStatus +} + +// +genclient=true + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +type ServiceAccount struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount + Secrets []ObjectReference + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // +optional + ImagePullSecrets []LocalObjectReference + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + AutomountServiceAccountToken *bool +} + +// ServiceAccountList is a list of ServiceAccount objects +type ServiceAccountList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ServiceAccount +} + +// +genclient=true + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +type Endpoints struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // The set of all endpoints is the union of all subsets. + Subsets []EndpointSubset +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +type EndpointSubset struct { + Addresses []EndpointAddress + NotReadyAddresses []EndpointAddress + Ports []EndpointPort +} + +// EndpointAddress is a tuple that describes single IP address. +type EndpointAddress struct { + // The IP of this endpoint. + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, see #4447. + IP string + // Optional: Hostname of this endpoint + // Meant to be used by DNS servers etc. + // +optional + Hostname string + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + NodeName *string + // Optional: The kubernetes object related to the entry point. + TargetRef *ObjectReference +} + +// EndpointPort is a tuple that describes a single port. +type EndpointPort struct { + // The name of this port (corresponds to ServicePort.Name). Optional + // if only one port is defined. Must be a DNS_LABEL. + Name string + + // The port number. + Port int32 + + // The IP protocol for this port. + Protocol Protocol +} + +// EndpointsList is a list of endpoints. +type EndpointsList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Endpoints +} + +// NodeSpec describes the attributes that a node is created with. +type NodeSpec struct { + // PodCIDR represents the pod IP range assigned to the node + // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. + // +optional + PodCIDR string + + // External ID of the node assigned by some machine database (e.g. a cloud provider) + // +optional + ExternalID string + + // ID of the node assigned by the cloud provider + // Note: format is "://" + // +optional + ProviderID string + + // Unschedulable controls node schedulability of new pods. By default node is schedulable. + // +optional + Unschedulable bool + + // If specified, the node's taints. + // +optional + Taints []Taint +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +type DaemonEndpoint struct { + /* + The port tag was not properly in quotes in earlier releases, so it must be + uppercased for backwards compat (since it was falling back to var name of + 'Port'). + */ + + // Port number of the given endpoint. + Port int32 +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +type NodeDaemonEndpoints struct { + // Endpoint on which Kubelet is listening. + // +optional + KubeletEndpoint DaemonEndpoint +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +type NodeSystemInfo struct { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is prefered. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + MachineID string + // SystemUUID reported by the node. For unique machine identification + // MachineID is prefered. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + SystemUUID string + // Boot ID reported by the node. + BootID string + // Kernel Version reported by the node. + KernelVersion string + // OS Image reported by the node. + OSImage string + // ContainerRuntime Version reported by the node. + ContainerRuntimeVersion string + // Kubelet Version reported by the node. + KubeletVersion string + // KubeProxy Version reported by the node. + KubeProxyVersion string + // The Operating System reported by the node + OperatingSystem string + // The Architecture reported by the node + Architecture string +} + +// NodeStatus is information about the current status of a node. +type NodeStatus struct { + // Capacity represents the total resources of a node. + // +optional + Capacity ResourceList + // Allocatable represents the resources of a node that are available for scheduling. + // +optional + Allocatable ResourceList + // NodePhase is the current lifecycle phase of the node. + // +optional + Phase NodePhase + // Conditions is an array of current node conditions. + // +optional + Conditions []NodeCondition + // Queried from cloud provider, if available. + // +optional + Addresses []NodeAddress + // Endpoints of daemons running on the Node. + // +optional + DaemonEndpoints NodeDaemonEndpoints + // Set of ids/uuids to uniquely identify the node. + // +optional + NodeInfo NodeSystemInfo + // List of container images on this node + // +optional + Images []ContainerImage + // List of attachable volumes in use (mounted) by the node. + // +optional + VolumesInUse []UniqueVolumeName + // List of volumes that are attached to the node. + // +optional + VolumesAttached []AttachedVolume +} + +type UniqueVolumeName string + +// AttachedVolume describes a volume attached to a node +type AttachedVolume struct { + // Name of the attached volume + Name UniqueVolumeName + + // DevicePath represents the device path where the volume should be available + DevicePath string +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +type AvoidPods struct { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + PreferAvoidPods []PreferAvoidPodsEntry +} + +// Describes a class of pods that should avoid this node. +type PreferAvoidPodsEntry struct { + // The class of pods. + PodSignature PodSignature + // Time at which this entry was added to the list. + // +optional + EvictionTime metav1.Time + // (brief) reason why this entry was added to the list. + // +optional + Reason string + // Human readable message indicating why this entry was added to the list. + // +optional + Message string +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +type PodSignature struct { + // Reference to controller whose pods should avoid this node. + // +optional + PodController *metav1.OwnerReference +} + +// Describe a container image +type ContainerImage struct { + // Names by which this image is known. + Names []string + // The size of the image in bytes. + // +optional + SizeBytes int64 +} + +type NodePhase string + +// These are the valid phases of node. +const ( + // NodePending means the node has been created/added by the system, but not configured. + NodePending NodePhase = "Pending" + // NodeRunning means the node has been configured and has Kubernetes components running. + NodeRunning NodePhase = "Running" + // NodeTerminated means the node has been removed from the cluster. + NodeTerminated NodePhase = "Terminated" +) + +type NodeConditionType string + +// These are valid conditions of node. Currently, we don't have enough information to decide +// node condition. In the future, we will add more. The proposed set of conditions are: +// NodeReady, NodeReachable +const ( + // NodeReady means kubelet is healthy and ready to accept pods. + NodeReady NodeConditionType = "Ready" + // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk + // space on the node. + NodeOutOfDisk NodeConditionType = "OutOfDisk" + // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. + NodeMemoryPressure NodeConditionType = "MemoryPressure" + // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. + NodeDiskPressure NodeConditionType = "DiskPressure" + // NodeNetworkUnavailable means that network for the node is not correctly configured. + NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" +) + +type NodeCondition struct { + Type NodeConditionType + Status ConditionStatus + // +optional + LastHeartbeatTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +type NodeAddressType string + +const ( + NodeHostName NodeAddressType = "Hostname" + NodeExternalIP NodeAddressType = "ExternalIP" + NodeInternalIP NodeAddressType = "InternalIP" + NodeExternalDNS NodeAddressType = "ExternalDNS" + NodeInternalDNS NodeAddressType = "InternalDNS" +) + +type NodeAddress struct { + Type NodeAddressType + Address string +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + // +optional + Capacity ResourceList +} + +// ResourceName is the name identifying various resources in a ResourceList. +type ResourceName string + +// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, +// with the -, _, and . characters allowed anywhere, except the first or last character. +// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than +// camel case, separating compound words. +// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. +const ( + // CPU, in cores. (500m = .5 cores) + ResourceCPU ResourceName = "cpu" + // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceMemory ResourceName = "memory" + // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) + ResourceStorage ResourceName = "storage" + // Local Storage for overlay filesystem, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // The resource name for ResourceStorageOverlay is alpha and it can change across releases. + ResourceStorageOverlay ResourceName = "storage.kubernetes.io/overlay" + // Local Storage for scratch space, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // The resource name for ResourceStorageScratch is alpha and it can change across releases. + ResourceStorageScratch ResourceName = "storage.kubernetes.io/scratch" + // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. + ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" + // Number of Pods that may be running on this Node: see ResourcePods +) + +const ( + // Namespace prefix for opaque counted resources (alpha). + ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" +) + +// ResourceList is a set of (resource name, quantity) pairs. +type ResourceList map[ResourceName]resource.Quantity + +// +genclient=true +// +nonNamespaced=true + +// Node is a worker node in Kubernetes +// The name of the node according to etcd is in ObjectMeta.Name. +type Node struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a node. + // +optional + Spec NodeSpec + + // Status describes the current status of a Node + // +optional + Status NodeStatus +} + +// NodeList is a list of nodes. +type NodeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Node +} + +// NamespaceSpec describes the attributes on a Namespace +type NamespaceSpec struct { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []FinalizerName +} + +// FinalizerName is the name identifying a finalizer during namespace lifecycle. +type FinalizerName string + +// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or +// in metav1. +const ( + FinalizerKubernetes FinalizerName = "kubernetes" +) + +// NamespaceStatus is information about the current status of a Namespace. +type NamespaceStatus struct { + // Phase is the current lifecycle phase of the namespace. + // +optional + Phase NamespacePhase +} + +type NamespacePhase string + +// These are the valid phases of a namespace. +const ( + // NamespaceActive means the namespace is available for use in the system + NamespaceActive NamespacePhase = "Active" + // NamespaceTerminating means the namespace is undergoing graceful termination + NamespaceTerminating NamespacePhase = "Terminating" +) + +// +genclient=true +// +nonNamespaced=true + +// A namespace provides a scope for Names. +// Use of multiple namespaces is optional +type Namespace struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of the Namespace. + // +optional + Spec NamespaceSpec + + // Status describes the current status of a Namespace + // +optional + Status NamespaceStatus +} + +// NamespaceList is a list of Namespaces. +type NamespaceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Namespace +} + +// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. +// Deprecated in 1.7, please use the bindings subresource of pods instead. +type Binding struct { + metav1.TypeMeta + // ObjectMeta describes the object that is being bound. + // +optional + metav1.ObjectMeta + + // Target is the object to bind to. + Target ObjectReference +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID +} + +// DeletionPropagation decides whether and how garbage collection will be performed. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store. + // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp. + // This policy is cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + +// DeleteOptions may be provided when deleting an API object +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +type DeleteOptions struct { + metav1.TypeMeta + + // Optional duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // +optional + GracePeriodSeconds *int64 + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + Preconditions *Preconditions + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + OrphanDependents *bool + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + PropagationPolicy *DeletionPropagation +} + +// ListOptions is the query options to a standard REST list call, and has future support for +// watch calls. +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +type ListOptions struct { + metav1.TypeMeta + + // A selector based on labels + LabelSelector labels.Selector + // A selector based on fields + FieldSelector fields.Selector + + // If true, partially initialized resources are included in the response. + IncludeUninitialized bool + + // If true, watch for changes to this list + Watch bool + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + ResourceVersion string + // Timeout for the list/watch call. + TimeoutSeconds *int64 +} + +// PodLogOptions is the query options for a Pod's logs REST call +type PodLogOptions struct { + metav1.TypeMeta + + // Container for which to return logs + Container string + // If true, follow the logs for the pod + Follow bool + // If true, return previous terminated container logs + Previous bool + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. + Timestamps bool + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 +} + +// PodAttachOptions is the query options to a Pod's remote attach call +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +type PodAttachOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the attach call + // +optional + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the attach call + // +optional + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the attach call + // +optional + Stderr bool + + // TTY if true indicates that a tty will be allocated for the attach call + // +optional + TTY bool + + // Container to attach to. + // +optional + Container string +} + +// PodExecOptions is the query options to a Pod's remote exec call +type PodExecOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the exec call + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the exec call + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the exec call + Stderr bool + + // TTY if true indicates that a tty will be allocated for the exec call + TTY bool + + // Container in which to execute the command. + Container string + + // Command is the remote command to execute; argv array; not executed within a shell. + Command []string +} + +// PodPortForwardOptions is the query options to a Pod's port forward call +type PodPortForwardOptions struct { + metav1.TypeMeta + + // The list of ports to forward + // +optional + Ports []int32 +} + +// PodProxyOptions is the query options to a Pod's proxy call +type PodProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// NodeProxyOptions is the query options to a Node's proxy call +type NodeProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// ServiceProxyOptions is the query options to a Service's proxy call. +type ServiceProxyOptions struct { + metav1.TypeMeta + + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + Path string +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // +optional + Kind string + // +optional + Namespace string + // +optional + Name string + // +optional + UID types.UID + // +optional + APIVersion string + // +optional + ResourceVersion string + + // Optional. If referring to a piece of an object instead of an entire object, this string + // should contain information to identify the sub-object. For example, if the object + // reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + FieldPath string +} + +// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. +type LocalObjectReference struct { + //TODO: Add other useful fields. apiVersion, kind, uid? + Name string +} + +type SerializedReference struct { + metav1.TypeMeta + // +optional + Reference ObjectReference +} + +type EventSource struct { + // Component from which the event is generated. + // +optional + Component string + // Node name on which the event is generated. + // +optional + Host string +} + +// Valid values for event types (new types could be added in future) +const ( + // Information only and will not cause any problems + EventTypeNormal string = "Normal" + // These events are to warn that something might go wrong + EventTypeWarning string = "Warning" +) + +// +genclient=true + +// Event is a report of an event somewhere in the cluster. +// TODO: Decide whether to store these separately or with the object they apply to. +type Event struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Required. The object that this event is about. + // +optional + InvolvedObject ObjectReference + + // Optional; this should be a short, machine understandable string that gives the reason + // for this event being generated. For example, if the event is reporting that a container + // can't start, the Reason might be "ImageNotFound". + // TODO: provide exact specification for format. + // +optional + Reason string + + // Optional. A human-readable description of the status of this operation. + // TODO: decide on maximum length. + // +optional + Message string + + // Optional. The component reporting this event. Should be a short machine understandable string. + // +optional + Source EventSource + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + FirstTimestamp metav1.Time + + // The time at which the most recent occurrence of this event was recorded. + // +optional + LastTimestamp metav1.Time + + // The number of times this event has occurred. + // +optional + Count int32 + + // Type of this event (Normal, Warning), new types could be added in the future. + // +optional + Type string +} + +// EventList is a list of events. +type EventList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Event +} + +// List holds a list of objects, which may not be known by the server. +type List struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []runtime.Object +} + +// A type of object that is limited +type LimitType string + +const ( + // Limit that applies to all pods in a namespace + LimitTypePod LimitType = "Pod" + // Limit that applies to all containers in a namespace + LimitTypeContainer LimitType = "Container" + // Limit that applies to all persistent volume claims in a namespace + LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" +) + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind +type LimitRangeItem struct { + // Type of resource that this limit applies to + // +optional + Type LimitType + // Max usage constraints on this kind by resource name + // +optional + Max ResourceList + // Min usage constraints on this kind by resource name + // +optional + Min ResourceList + // Default resource requirement limit value by resource name. + // +optional + Default ResourceList + // DefaultRequest resource requirement request value by resource name. + // +optional + DefaultRequest ResourceList + // MaxLimitRequestRatio represents the max burst value for the named resource + // +optional + MaxLimitRequestRatio ResourceList +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind +type LimitRangeSpec struct { + // Limits is the list of LimitRangeItem objects that are enforced + Limits []LimitRangeItem +} + +// +genclient=true + +// LimitRange sets resource usage limits for each kind of resource in a Namespace +type LimitRange struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the limits enforced + // +optional + Spec LimitRangeSpec +} + +// LimitRangeList is a list of LimitRange items. +type LimitRangeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of LimitRange objects + Items []LimitRange +} + +// The following identify resource constants for Kubernetes object types +const ( + // Pods, number + ResourcePods ResourceName = "pods" + // Services, number + ResourceServices ResourceName = "services" + // ReplicationControllers, number + ResourceReplicationControllers ResourceName = "replicationcontrollers" + // ResourceQuotas, number + ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourceConfigMaps, number + ResourceConfigMaps ResourceName = "configmaps" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" + // ResourceServicesNodePorts, number + ResourceServicesNodePorts ResourceName = "services.nodeports" + // ResourceServicesLoadBalancers, number + ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" + // CPU request, in cores. (500m = .5 cores) + ResourceRequestsCPU ResourceName = "requests.cpu" + // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsMemory ResourceName = "requests.memory" + // Storage request, in bytes + ResourceRequestsStorage ResourceName = "requests.storage" + // CPU limit, in cores. (500m = .5 cores) + ResourceLimitsCPU ResourceName = "limits.cpu" + // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsMemory ResourceName = "limits.memory" +) + +// A ResourceQuotaScope defines a filter that must match each object tracked by a quota +type ResourceQuotaScope string + +const ( + // Match all pod objects where spec.activeDeadlineSeconds + ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" + // Match all pod objects where !spec.activeDeadlineSeconds + ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" + // Match all pod objects that have best effort quality of service + ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" + // Match all pod objects that do not have best effort quality of service + ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" +) + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota +type ResourceQuotaSpec struct { + // Hard is the set of desired hard limits for each named resource + // +optional + Hard ResourceList + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + Scopes []ResourceQuotaScope +} + +// ResourceQuotaStatus defines the enforced hard limits and observed use +type ResourceQuotaStatus struct { + // Hard is the set of enforced hard limits for each named resource + // +optional + Hard ResourceList + // Used is the current observed total usage of the resource in the namespace + // +optional + Used ResourceList +} + +// +genclient=true + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +type ResourceQuota struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired quota + // +optional + Spec ResourceQuotaSpec + + // Status defines the actual enforced quota and its current usage + // +optional + Status ResourceQuotaStatus +} + +// ResourceQuotaList is a list of ResourceQuota items +type ResourceQuotaList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of ResourceQuota objects + Items []ResourceQuota +} + +// +genclient=true + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +type Secret struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the secret data. Each key must consist of alphanumeric + // characters, '-', '_' or '.'. The serialized form of the secret data is a + // base64 encoded string, representing the arbitrary (possibly non-string) + // data value here. + // +optional + Data map[string][]byte + + // Used to facilitate programmatic handling of secret data. + // +optional + Type SecretType +} + +const MaxSecretSize = 1 * 1024 * 1024 + +type SecretType string + +const ( + // SecretTypeOpaque is the default; arbitrary user-defined data + SecretTypeOpaque SecretType = "Opaque" + + // SecretTypeServiceAccountToken contains a token that identifies a service account to the API + // + // Required fields: + // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies + // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies + // - Secret.Data["token"] - a token that identifies the service account to the API + SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" + + // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountNameKey = "kubernetes.io/service-account.name" + // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountUIDKey = "kubernetes.io/service-account.uid" + // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets + ServiceAccountTokenKey = "token" + // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets + ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" + // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets + ServiceAccountRootCAKey = "ca.crt" + // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls + ServiceAccountNamespaceKey = "namespace" + + // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg + // + // Required fields: + // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file + SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" + + // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets + DockerConfigKey = ".dockercfg" + + // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json + // + // Required fields: + // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file + SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" + + // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets + DockerConfigJsonKey = ".dockerconfigjson" + + // SecretTypeBasicAuth contains data needed for basic authentication. + // + // Required at least one of fields: + // - Secret.Data["username"] - username used for authentication + // - Secret.Data["password"] - password or token needed for authentication + SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" + + // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets + BasicAuthUsernameKey = "username" + // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets + BasicAuthPasswordKey = "password" + + // SecretTypeSSHAuth contains data needed for SSH authetication. + // + // Required field: + // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication + SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" + + // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets + SSHAuthPrivateKey = "ssh-privatekey" + + // SecretTypeTLS contains information about a TLS client or server secret. It + // is primarily used with TLS termination of the Ingress resource, but may be + // used in other types. + // + // Required fields: + // - Secret.Data["tls.key"] - TLS private key. + // Secret.Data["tls.crt"] - TLS certificate. + // TODO: Consider supporting different formats, specifying CA/destinationCA. + SecretTypeTLS SecretType = "kubernetes.io/tls" + + // TLSCertKey is the key for tls certificates in a TLS secret. + TLSCertKey = "tls.crt" + // TLSPrivateKeyKey is the key for the private key field in a TLS secret. + TLSPrivateKeyKey = "tls.key" +) + +type SecretList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Secret +} + +// +genclient=true + +// ConfigMap holds configuration data for components or applications to consume. +type ConfigMap struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the configuration data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // +optional + Data map[string]string +} + +// ConfigMapList is a resource containing a list of ConfigMap objects. +type ConfigMapList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is the list of ConfigMaps. + Items []ConfigMap +} + +// These constants are for remote command execution and port forwarding and are +// used by both the client side and server side components. +// +// This is probably not the ideal place for them, but it didn't seem worth it +// to create pkg/exec and pkg/portforward just to contain a single file with +// constants in it. Suggestions for more appropriate alternatives are +// definitely welcome! +const ( + // Enable stdin for remote command execution + ExecStdinParam = "input" + // Enable stdout for remote command execution + ExecStdoutParam = "output" + // Enable stderr for remote command execution + ExecStderrParam = "error" + // Enable TTY for remote command execution + ExecTTYParam = "tty" + // Command to run for remote command execution + ExecCommandParamm = "command" + + // Name of header that specifies stream type + StreamType = "streamType" + // Value for streamType header for stdin stream + StreamTypeStdin = "stdin" + // Value for streamType header for stdout stream + StreamTypeStdout = "stdout" + // Value for streamType header for stderr stream + StreamTypeStderr = "stderr" + // Value for streamType header for data stream + StreamTypeData = "data" + // Value for streamType header for error stream + StreamTypeError = "error" + // Value for streamType header for terminal resize stream + StreamTypeResize = "resize" + + // Name of header that specifies the port being forwarded + PortHeader = "port" + // Name of header that specifies a request ID used to associate the error + // and data streams for a single forwarded connection + PortForwardRequestIDHeader = "requestID" +) + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +type ComponentCondition struct { + Type ComponentConditionType + Status ConditionStatus + // +optional + Message string + // +optional + Error string +} + +// +genclient=true +// +nonNamespaced=true + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // +optional + Conditions []ComponentCondition +} + +type ComponentStatusList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ComponentStatus +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +type SecurityContext struct { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + Capabilities *Capabilities + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + Privileged *bool + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + SELinuxOptions *SELinuxOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsUser *types.UnixUserID + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool + // The read-only root filesystem allows you to restrict the locations that an application can write + // files to, ensuring the persistent data can only be written to mounts. + // +optional + ReadOnlyRootFilesystem *bool +} + +// SELinuxOptions are the labels to be applied to the container. +type SELinuxOptions struct { + // SELinux user label + // +optional + User string + // SELinux role label + // +optional + Role string + // SELinux type label + // +optional + Type string + // SELinux level label. + // +optional + Level string +} + +// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record +// the global allocation state of the cluster. The schema of Range and Data generic, in that Range +// should be a string representation of the inputs to a range (for instance, for IP allocation it +// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a +// binary range. Consumers should use annotations to record additional information (schema version, +// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation +// of the cluster, thus the object is less strongly typed than most. +type RangeAllocation struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or + // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define + // a start and end unless there is an implicit end. + Range string + // A byte array representing the serialized state of a range allocation. Additional clarifiers on + // the type or format of data should be represented with annotations. For IP allocations, this is + // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing + // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). + Data []byte +} + +const ( + // "default-scheduler" is the name of default scheduler. + DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int = 1 +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go new file mode 100644 index 000000000..b9b24716e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go @@ -0,0 +1,3776 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package api + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + fields "k8s.io/apimachinery/pkg/fields" + labels "k8s.io/apimachinery/pkg/labels" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AWSElasticBlockStoreVolumeSource, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Affinity, InType: reflect.TypeOf(&Affinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AttachedVolume, InType: reflect.TypeOf(&AttachedVolume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AvoidPods, InType: reflect.TypeOf(&AvoidPods{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AzureDiskVolumeSource, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AzureFileVolumeSource, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Binding, InType: reflect.TypeOf(&Binding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Capabilities, InType: reflect.TypeOf(&Capabilities{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_CephFSVolumeSource, InType: reflect.TypeOf(&CephFSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_CinderVolumeSource, InType: reflect.TypeOf(&CinderVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentCondition, InType: reflect.TypeOf(&ComponentCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentStatus, InType: reflect.TypeOf(&ComponentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentStatusList, InType: reflect.TypeOf(&ComponentStatusList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMap, InType: reflect.TypeOf(&ConfigMap{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapEnvSource, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapKeySelector, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapList, InType: reflect.TypeOf(&ConfigMapList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapProjection, InType: reflect.TypeOf(&ConfigMapProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapVolumeSource, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Container, InType: reflect.TypeOf(&Container{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerImage, InType: reflect.TypeOf(&ContainerImage{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerPort, InType: reflect.TypeOf(&ContainerPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerState, InType: reflect.TypeOf(&ContainerState{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStateRunning, InType: reflect.TypeOf(&ContainerStateRunning{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStateTerminated, InType: reflect.TypeOf(&ContainerStateTerminated{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStateWaiting, InType: reflect.TypeOf(&ContainerStateWaiting{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStatus, InType: reflect.TypeOf(&ContainerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DaemonEndpoint, InType: reflect.TypeOf(&DaemonEndpoint{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIProjection, InType: reflect.TypeOf(&DownwardAPIProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIVolumeFile, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIVolumeSource, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EmptyDirVolumeSource, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointAddress, InType: reflect.TypeOf(&EndpointAddress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointPort, InType: reflect.TypeOf(&EndpointPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointSubset, InType: reflect.TypeOf(&EndpointSubset{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Endpoints, InType: reflect.TypeOf(&Endpoints{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointsList, InType: reflect.TypeOf(&EndpointsList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvFromSource, InType: reflect.TypeOf(&EnvFromSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvVar, InType: reflect.TypeOf(&EnvVar{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvVarSource, InType: reflect.TypeOf(&EnvVarSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Event, InType: reflect.TypeOf(&Event{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EventList, InType: reflect.TypeOf(&EventList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EventSource, InType: reflect.TypeOf(&EventSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ExecAction, InType: reflect.TypeOf(&ExecAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FCVolumeSource, InType: reflect.TypeOf(&FCVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FlexVolumeSource, InType: reflect.TypeOf(&FlexVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FlockerVolumeSource, InType: reflect.TypeOf(&FlockerVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_GCEPersistentDiskVolumeSource, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_GitRepoVolumeSource, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_GlusterfsVolumeSource, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HTTPGetAction, InType: reflect.TypeOf(&HTTPGetAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HTTPHeader, InType: reflect.TypeOf(&HTTPHeader{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Handler, InType: reflect.TypeOf(&Handler{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HostAlias, InType: reflect.TypeOf(&HostAlias{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HostPathVolumeSource, InType: reflect.TypeOf(&HostPathVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ISCSIVolumeSource, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_KeyToPath, InType: reflect.TypeOf(&KeyToPath{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Lifecycle, InType: reflect.TypeOf(&Lifecycle{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRange, InType: reflect.TypeOf(&LimitRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRangeItem, InType: reflect.TypeOf(&LimitRangeItem{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRangeList, InType: reflect.TypeOf(&LimitRangeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRangeSpec, InType: reflect.TypeOf(&LimitRangeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_List, InType: reflect.TypeOf(&List{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ListOptions, InType: reflect.TypeOf(&ListOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LoadBalancerIngress, InType: reflect.TypeOf(&LoadBalancerIngress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LoadBalancerStatus, InType: reflect.TypeOf(&LoadBalancerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LocalObjectReference, InType: reflect.TypeOf(&LocalObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LocalVolumeSource, InType: reflect.TypeOf(&LocalVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NFSVolumeSource, InType: reflect.TypeOf(&NFSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Namespace, InType: reflect.TypeOf(&Namespace{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NamespaceList, InType: reflect.TypeOf(&NamespaceList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NamespaceSpec, InType: reflect.TypeOf(&NamespaceSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NamespaceStatus, InType: reflect.TypeOf(&NamespaceStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Node, InType: reflect.TypeOf(&Node{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeAddress, InType: reflect.TypeOf(&NodeAddress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeAffinity, InType: reflect.TypeOf(&NodeAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeCondition, InType: reflect.TypeOf(&NodeCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeDaemonEndpoints, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeList, InType: reflect.TypeOf(&NodeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeProxyOptions, InType: reflect.TypeOf(&NodeProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeResources, InType: reflect.TypeOf(&NodeResources{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSelector, InType: reflect.TypeOf(&NodeSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSelectorRequirement, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSelectorTerm, InType: reflect.TypeOf(&NodeSelectorTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSpec, InType: reflect.TypeOf(&NodeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeStatus, InType: reflect.TypeOf(&NodeStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSystemInfo, InType: reflect.TypeOf(&NodeSystemInfo{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectFieldSelector, InType: reflect.TypeOf(&ObjectFieldSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectMeta, InType: reflect.TypeOf(&ObjectMeta{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectReference, InType: reflect.TypeOf(&ObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolume, InType: reflect.TypeOf(&PersistentVolume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaim, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimList, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimSpec, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimStatus, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimVolumeSource, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeList, InType: reflect.TypeOf(&PersistentVolumeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeSource, InType: reflect.TypeOf(&PersistentVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeSpec, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeStatus, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PhotonPersistentDiskVolumeSource, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Pod, InType: reflect.TypeOf(&Pod{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAffinity, InType: reflect.TypeOf(&PodAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAffinityTerm, InType: reflect.TypeOf(&PodAffinityTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAntiAffinity, InType: reflect.TypeOf(&PodAntiAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAttachOptions, InType: reflect.TypeOf(&PodAttachOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodCondition, InType: reflect.TypeOf(&PodCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodExecOptions, InType: reflect.TypeOf(&PodExecOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodList, InType: reflect.TypeOf(&PodList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodLogOptions, InType: reflect.TypeOf(&PodLogOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodPortForwardOptions, InType: reflect.TypeOf(&PodPortForwardOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodProxyOptions, InType: reflect.TypeOf(&PodProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSecurityContext, InType: reflect.TypeOf(&PodSecurityContext{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSignature, InType: reflect.TypeOf(&PodSignature{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSpec, InType: reflect.TypeOf(&PodSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodStatus, InType: reflect.TypeOf(&PodStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodStatusResult, InType: reflect.TypeOf(&PodStatusResult{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplate, InType: reflect.TypeOf(&PodTemplate{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplateList, InType: reflect.TypeOf(&PodTemplateList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplateSpec, InType: reflect.TypeOf(&PodTemplateSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PortworxVolumeSource, InType: reflect.TypeOf(&PortworxVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Preconditions, InType: reflect.TypeOf(&Preconditions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PreferAvoidPodsEntry, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PreferredSchedulingTerm, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Probe, InType: reflect.TypeOf(&Probe{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ProjectedVolumeSource, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_QuobyteVolumeSource, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_RBDVolumeSource, InType: reflect.TypeOf(&RBDVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_RangeAllocation, InType: reflect.TypeOf(&RangeAllocation{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationController, InType: reflect.TypeOf(&ReplicationController{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerCondition, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerList, InType: reflect.TypeOf(&ReplicationControllerList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerSpec, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerStatus, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceFieldSelector, InType: reflect.TypeOf(&ResourceFieldSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuota, InType: reflect.TypeOf(&ResourceQuota{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuotaList, InType: reflect.TypeOf(&ResourceQuotaList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuotaSpec, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuotaStatus, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceRequirements, InType: reflect.TypeOf(&ResourceRequirements{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SELinuxOptions, InType: reflect.TypeOf(&SELinuxOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ScaleIOVolumeSource, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Secret, InType: reflect.TypeOf(&Secret{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretEnvSource, InType: reflect.TypeOf(&SecretEnvSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretKeySelector, InType: reflect.TypeOf(&SecretKeySelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretList, InType: reflect.TypeOf(&SecretList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretProjection, InType: reflect.TypeOf(&SecretProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretVolumeSource, InType: reflect.TypeOf(&SecretVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecurityContext, InType: reflect.TypeOf(&SecurityContext{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SerializedReference, InType: reflect.TypeOf(&SerializedReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Service, InType: reflect.TypeOf(&Service{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceAccount, InType: reflect.TypeOf(&ServiceAccount{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceAccountList, InType: reflect.TypeOf(&ServiceAccountList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceList, InType: reflect.TypeOf(&ServiceList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServicePort, InType: reflect.TypeOf(&ServicePort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceProxyOptions, InType: reflect.TypeOf(&ServiceProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceSpec, InType: reflect.TypeOf(&ServiceSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceStatus, InType: reflect.TypeOf(&ServiceStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_StorageOSPersistentVolumeSource, InType: reflect.TypeOf(&StorageOSPersistentVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_StorageOSVolumeSource, InType: reflect.TypeOf(&StorageOSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Sysctl, InType: reflect.TypeOf(&Sysctl{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_TCPSocketAction, InType: reflect.TypeOf(&TCPSocketAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Taint, InType: reflect.TypeOf(&Taint{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Toleration, InType: reflect.TypeOf(&Toleration{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Volume, InType: reflect.TypeOf(&Volume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeMount, InType: reflect.TypeOf(&VolumeMount{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeProjection, InType: reflect.TypeOf(&VolumeProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeSource, InType: reflect.TypeOf(&VolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VsphereVirtualDiskVolumeSource, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_WeightedPodAffinityTerm, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, + ) +} + +// DeepCopy_api_AWSElasticBlockStoreVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_AWSElasticBlockStoreVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AWSElasticBlockStoreVolumeSource) + out := out.(*AWSElasticBlockStoreVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_Affinity is an autogenerated deepcopy function. +func DeepCopy_api_Affinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Affinity) + out := out.(*Affinity) + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(NodeAffinity) + if err := DeepCopy_api_NodeAffinity(*in, *out, c); err != nil { + return err + } + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + if err := DeepCopy_api_PodAffinity(*in, *out, c); err != nil { + return err + } + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + if err := DeepCopy_api_PodAntiAffinity(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_AttachedVolume is an autogenerated deepcopy function. +func DeepCopy_api_AttachedVolume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AttachedVolume) + out := out.(*AttachedVolume) + *out = *in + return nil + } +} + +// DeepCopy_api_AvoidPods is an autogenerated deepcopy function. +func DeepCopy_api_AvoidPods(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AvoidPods) + out := out.(*AvoidPods) + *out = *in + if in.PreferAvoidPods != nil { + in, out := &in.PreferAvoidPods, &out.PreferAvoidPods + *out = make([]PreferAvoidPodsEntry, len(*in)) + for i := range *in { + if err := DeepCopy_api_PreferAvoidPodsEntry(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_AzureDiskVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_AzureDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AzureDiskVolumeSource) + out := out.(*AzureDiskVolumeSource) + *out = *in + if in.CachingMode != nil { + in, out := &in.CachingMode, &out.CachingMode + *out = new(AzureDataDiskCachingMode) + **out = **in + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(AzureDataDiskKind) + **out = **in + } + return nil + } +} + +// DeepCopy_api_AzureFileVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_AzureFileVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AzureFileVolumeSource) + out := out.(*AzureFileVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_Binding is an autogenerated deepcopy function. +func DeepCopy_api_Binding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Binding) + out := out.(*Binding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + return nil + } +} + +// DeepCopy_api_Capabilities is an autogenerated deepcopy function. +func DeepCopy_api_Capabilities(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Capabilities) + out := out.(*Capabilities) + *out = *in + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + if in.Drop != nil { + in, out := &in.Drop, &out.Drop + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_CephFSVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_CephFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CephFSVolumeSource) + out := out.(*CephFSVolumeSource) + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_api_CinderVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_CinderVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CinderVolumeSource) + out := out.(*CinderVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_ComponentCondition is an autogenerated deepcopy function. +func DeepCopy_api_ComponentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentCondition) + out := out.(*ComponentCondition) + *out = *in + return nil + } +} + +// DeepCopy_api_ComponentStatus is an autogenerated deepcopy function. +func DeepCopy_api_ComponentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentStatus) + out := out.(*ComponentStatus) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ComponentCondition, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_ComponentStatusList is an autogenerated deepcopy function. +func DeepCopy_api_ComponentStatusList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentStatusList) + out := out.(*ComponentStatusList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComponentStatus, len(*in)) + for i := range *in { + if err := DeepCopy_api_ComponentStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_ConfigMap is an autogenerated deepcopy function. +func DeepCopy_api_ConfigMap(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMap) + out := out.(*ConfigMap) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +// DeepCopy_api_ConfigMapEnvSource is an autogenerated deepcopy function. +func DeepCopy_api_ConfigMapEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapEnvSource) + out := out.(*ConfigMapEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_ConfigMapKeySelector is an autogenerated deepcopy function. +func DeepCopy_api_ConfigMapKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapKeySelector) + out := out.(*ConfigMapKeySelector) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_ConfigMapList is an autogenerated deepcopy function. +func DeepCopy_api_ConfigMapList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapList) + out := out.(*ConfigMapList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigMap, len(*in)) + for i := range *in { + if err := DeepCopy_api_ConfigMap(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_ConfigMapProjection is an autogenerated deepcopy function. +func DeepCopy_api_ConfigMapProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapProjection) + out := out.(*ConfigMapProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_ConfigMapVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_ConfigMapVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapVolumeSource) + out := out.(*ConfigMapVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_Container is an autogenerated deepcopy function. +func DeepCopy_api_Container(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Container) + out := out.(*Container) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + if err := DeepCopy_api_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + if err := DeepCopy_api_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if err := DeepCopy_api_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { + return err + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + if err := DeepCopy_api_Probe(*in, *out, c); err != nil { + return err + } + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + if err := DeepCopy_api_Probe(*in, *out, c); err != nil { + return err + } + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + if err := DeepCopy_api_Lifecycle(*in, *out, c); err != nil { + return err + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + if err := DeepCopy_api_SecurityContext(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_ContainerImage is an autogenerated deepcopy function. +func DeepCopy_api_ContainerImage(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerImage) + out := out.(*ContainerImage) + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_ContainerPort is an autogenerated deepcopy function. +func DeepCopy_api_ContainerPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerPort) + out := out.(*ContainerPort) + *out = *in + return nil + } +} + +// DeepCopy_api_ContainerState is an autogenerated deepcopy function. +func DeepCopy_api_ContainerState(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerState) + out := out.(*ContainerState) + *out = *in + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + *out = new(ContainerStateWaiting) + **out = **in + } + if in.Running != nil { + in, out := &in.Running, &out.Running + *out = new(ContainerStateRunning) + if err := DeepCopy_api_ContainerStateRunning(*in, *out, c); err != nil { + return err + } + } + if in.Terminated != nil { + in, out := &in.Terminated, &out.Terminated + *out = new(ContainerStateTerminated) + if err := DeepCopy_api_ContainerStateTerminated(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_ContainerStateRunning is an autogenerated deepcopy function. +func DeepCopy_api_ContainerStateRunning(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateRunning) + out := out.(*ContainerStateRunning) + *out = *in + out.StartedAt = in.StartedAt.DeepCopy() + return nil + } +} + +// DeepCopy_api_ContainerStateTerminated is an autogenerated deepcopy function. +func DeepCopy_api_ContainerStateTerminated(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateTerminated) + out := out.(*ContainerStateTerminated) + *out = *in + out.StartedAt = in.StartedAt.DeepCopy() + out.FinishedAt = in.FinishedAt.DeepCopy() + return nil + } +} + +// DeepCopy_api_ContainerStateWaiting is an autogenerated deepcopy function. +func DeepCopy_api_ContainerStateWaiting(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateWaiting) + out := out.(*ContainerStateWaiting) + *out = *in + return nil + } +} + +// DeepCopy_api_ContainerStatus is an autogenerated deepcopy function. +func DeepCopy_api_ContainerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStatus) + out := out.(*ContainerStatus) + *out = *in + if err := DeepCopy_api_ContainerState(&in.State, &out.State, c); err != nil { + return err + } + if err := DeepCopy_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_DaemonEndpoint is an autogenerated deepcopy function. +func DeepCopy_api_DaemonEndpoint(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonEndpoint) + out := out.(*DaemonEndpoint) + *out = *in + return nil + } +} + +// DeepCopy_api_DeleteOptions is an autogenerated deepcopy function. +func DeepCopy_api_DeleteOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeleteOptions) + out := out.(*DeleteOptions) + *out = *in + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + *out = new(Preconditions) + if err := DeepCopy_api_Preconditions(*in, *out, c); err != nil { + return err + } + } + if in.OrphanDependents != nil { + in, out := &in.OrphanDependents, &out.OrphanDependents + *out = new(bool) + **out = **in + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + *out = new(DeletionPropagation) + **out = **in + } + return nil + } +} + +// DeepCopy_api_DownwardAPIProjection is an autogenerated deepcopy function. +func DeepCopy_api_DownwardAPIProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIProjection) + out := out.(*DownwardAPIProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := DeepCopy_api_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_DownwardAPIVolumeFile is an autogenerated deepcopy function. +func DeepCopy_api_DownwardAPIVolumeFile(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIVolumeFile) + out := out.(*DownwardAPIVolumeFile) + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_api_DownwardAPIVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_DownwardAPIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIVolumeSource) + out := out.(*DownwardAPIVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := DeepCopy_api_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_api_EmptyDirVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_EmptyDirVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EmptyDirVolumeSource) + out := out.(*EmptyDirVolumeSource) + *out = *in + out.SizeLimit = in.SizeLimit.DeepCopy() + return nil + } +} + +// DeepCopy_api_EndpointAddress is an autogenerated deepcopy function. +func DeepCopy_api_EndpointAddress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointAddress) + out := out.(*EndpointAddress) + *out = *in + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_api_EndpointPort is an autogenerated deepcopy function. +func DeepCopy_api_EndpointPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointPort) + out := out.(*EndpointPort) + *out = *in + return nil + } +} + +// DeepCopy_api_EndpointSubset is an autogenerated deepcopy function. +func DeepCopy_api_EndpointSubset(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointSubset) + out := out.(*EndpointSubset) + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + if err := DeepCopy_api_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.NotReadyAddresses != nil { + in, out := &in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + if err := DeepCopy_api_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_Endpoints is an autogenerated deepcopy function. +func DeepCopy_api_Endpoints(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Endpoints) + out := out.(*Endpoints) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]EndpointSubset, len(*in)) + for i := range *in { + if err := DeepCopy_api_EndpointSubset(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_EndpointsList is an autogenerated deepcopy function. +func DeepCopy_api_EndpointsList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointsList) + out := out.(*EndpointsList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoints, len(*in)) + for i := range *in { + if err := DeepCopy_api_Endpoints(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_EnvFromSource is an autogenerated deepcopy function. +func DeepCopy_api_EnvFromSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvFromSource) + out := out.(*EnvFromSource) + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(ConfigMapEnvSource) + if err := DeepCopy_api_ConfigMapEnvSource(*in, *out, c); err != nil { + return err + } + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretEnvSource) + if err := DeepCopy_api_SecretEnvSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_EnvVar is an autogenerated deepcopy function. +func DeepCopy_api_EnvVar(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvVar) + out := out.(*EnvVar) + *out = *in + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(EnvVarSource) + if err := DeepCopy_api_EnvVarSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_EnvVarSource is an autogenerated deepcopy function. +func DeepCopy_api_EnvVarSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvVarSource) + out := out.(*EnvVarSource) + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(ConfigMapKeySelector) + if err := DeepCopy_api_ConfigMapKeySelector(*in, *out, c); err != nil { + return err + } + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(SecretKeySelector) + if err := DeepCopy_api_SecretKeySelector(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_Event is an autogenerated deepcopy function. +func DeepCopy_api_Event(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Event) + out := out.(*Event) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + out.FirstTimestamp = in.FirstTimestamp.DeepCopy() + out.LastTimestamp = in.LastTimestamp.DeepCopy() + return nil + } +} + +// DeepCopy_api_EventList is an autogenerated deepcopy function. +func DeepCopy_api_EventList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EventList) + out := out.(*EventList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + if err := DeepCopy_api_Event(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_EventSource is an autogenerated deepcopy function. +func DeepCopy_api_EventSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EventSource) + out := out.(*EventSource) + *out = *in + return nil + } +} + +// DeepCopy_api_ExecAction is an autogenerated deepcopy function. +func DeepCopy_api_ExecAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExecAction) + out := out.(*ExecAction) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_FCVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_FCVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FCVolumeSource) + out := out.(*FCVolumeSource) + *out = *in + if in.TargetWWNs != nil { + in, out := &in.TargetWWNs, &out.TargetWWNs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_api_FlexVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_FlexVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FlexVolumeSource) + out := out.(*FlexVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +// DeepCopy_api_FlockerVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_FlockerVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FlockerVolumeSource) + out := out.(*FlockerVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_GCEPersistentDiskVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_GCEPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GCEPersistentDiskVolumeSource) + out := out.(*GCEPersistentDiskVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_GitRepoVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_GitRepoVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GitRepoVolumeSource) + out := out.(*GitRepoVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_GlusterfsVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_GlusterfsVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GlusterfsVolumeSource) + out := out.(*GlusterfsVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_HTTPGetAction is an autogenerated deepcopy function. +func DeepCopy_api_HTTPGetAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPGetAction) + out := out.(*HTTPGetAction) + *out = *in + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_HTTPHeader is an autogenerated deepcopy function. +func DeepCopy_api_HTTPHeader(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPHeader) + out := out.(*HTTPHeader) + *out = *in + return nil + } +} + +// DeepCopy_api_Handler is an autogenerated deepcopy function. +func DeepCopy_api_Handler(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Handler) + out := out.(*Handler) + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecAction) + if err := DeepCopy_api_ExecAction(*in, *out, c); err != nil { + return err + } + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + if err := DeepCopy_api_HTTPGetAction(*in, *out, c); err != nil { + return err + } + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + **out = **in + } + return nil + } +} + +// DeepCopy_api_HostAlias is an autogenerated deepcopy function. +func DeepCopy_api_HostAlias(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HostAlias) + out := out.(*HostAlias) + *out = *in + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_HostPathVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_HostPathVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HostPathVolumeSource) + out := out.(*HostPathVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_ISCSIVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_ISCSIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ISCSIVolumeSource) + out := out.(*ISCSIVolumeSource) + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_api_KeyToPath is an autogenerated deepcopy function. +func DeepCopy_api_KeyToPath(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*KeyToPath) + out := out.(*KeyToPath) + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_api_Lifecycle is an autogenerated deepcopy function. +func DeepCopy_api_Lifecycle(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Lifecycle) + out := out.(*Lifecycle) + *out = *in + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + *out = new(Handler) + if err := DeepCopy_api_Handler(*in, *out, c); err != nil { + return err + } + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + *out = new(Handler) + if err := DeepCopy_api_Handler(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_LimitRange is an autogenerated deepcopy function. +func DeepCopy_api_LimitRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRange) + out := out.(*LimitRange) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_LimitRangeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_LimitRangeItem is an autogenerated deepcopy function. +func DeepCopy_api_LimitRangeItem(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeItem) + out := out.(*LimitRangeItem) + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.DefaultRequest != nil { + in, out := &in.DefaultRequest, &out.DefaultRequest + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxLimitRequestRatio != nil { + in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +// DeepCopy_api_LimitRangeList is an autogenerated deepcopy function. +func DeepCopy_api_LimitRangeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeList) + out := out.(*LimitRangeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LimitRange, len(*in)) + for i := range *in { + if err := DeepCopy_api_LimitRange(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_LimitRangeSpec is an autogenerated deepcopy function. +func DeepCopy_api_LimitRangeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeSpec) + out := out.(*LimitRangeSpec) + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make([]LimitRangeItem, len(*in)) + for i := range *in { + if err := DeepCopy_api_LimitRangeItem(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_List is an autogenerated deepcopy function. +func DeepCopy_api_List(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*List) + out := out.(*List) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*runtime.Object) + } + } + } + return nil + } +} + +// DeepCopy_api_ListOptions is an autogenerated deepcopy function. +func DeepCopy_api_ListOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ListOptions) + out := out.(*ListOptions) + *out = *in + // in.LabelSelector is kind 'Interface' + if in.LabelSelector != nil { + if newVal, err := c.DeepCopy(&in.LabelSelector); err != nil { + return err + } else { + out.LabelSelector = *newVal.(*labels.Selector) + } + } + // in.FieldSelector is kind 'Interface' + if in.FieldSelector != nil { + if newVal, err := c.DeepCopy(&in.FieldSelector); err != nil { + return err + } else { + out.FieldSelector = *newVal.(*fields.Selector) + } + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +// DeepCopy_api_LoadBalancerIngress is an autogenerated deepcopy function. +func DeepCopy_api_LoadBalancerIngress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LoadBalancerIngress) + out := out.(*LoadBalancerIngress) + *out = *in + return nil + } +} + +// DeepCopy_api_LoadBalancerStatus is an autogenerated deepcopy function. +func DeepCopy_api_LoadBalancerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LoadBalancerStatus) + out := out.(*LoadBalancerStatus) + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]LoadBalancerIngress, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_LocalObjectReference is an autogenerated deepcopy function. +func DeepCopy_api_LocalObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalObjectReference) + out := out.(*LocalObjectReference) + *out = *in + return nil + } +} + +// DeepCopy_api_LocalVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_LocalVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalVolumeSource) + out := out.(*LocalVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_NFSVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_NFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NFSVolumeSource) + out := out.(*NFSVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_Namespace is an autogenerated deepcopy function. +func DeepCopy_api_Namespace(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Namespace) + out := out.(*Namespace) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_NamespaceSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_NamespaceList is an autogenerated deepcopy function. +func DeepCopy_api_NamespaceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceList) + out := out.(*NamespaceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Namespace, len(*in)) + for i := range *in { + if err := DeepCopy_api_Namespace(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_NamespaceSpec is an autogenerated deepcopy function. +func DeepCopy_api_NamespaceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceSpec) + out := out.(*NamespaceSpec) + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]FinalizerName, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_NamespaceStatus is an autogenerated deepcopy function. +func DeepCopy_api_NamespaceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceStatus) + out := out.(*NamespaceStatus) + *out = *in + return nil + } +} + +// DeepCopy_api_Node is an autogenerated deepcopy function. +func DeepCopy_api_Node(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Node) + out := out.(*Node) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_NodeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_NodeStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_NodeAddress is an autogenerated deepcopy function. +func DeepCopy_api_NodeAddress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeAddress) + out := out.(*NodeAddress) + *out = *in + return nil + } +} + +// DeepCopy_api_NodeAffinity is an autogenerated deepcopy function. +func DeepCopy_api_NodeAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeAffinity) + out := out.(*NodeAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = new(NodeSelector) + if err := DeepCopy_api_NodeSelector(*in, *out, c); err != nil { + return err + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]PreferredSchedulingTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_PreferredSchedulingTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_NodeCondition is an autogenerated deepcopy function. +func DeepCopy_api_NodeCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeCondition) + out := out.(*NodeCondition) + *out = *in + out.LastHeartbeatTime = in.LastHeartbeatTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +// DeepCopy_api_NodeDaemonEndpoints is an autogenerated deepcopy function. +func DeepCopy_api_NodeDaemonEndpoints(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeDaemonEndpoints) + out := out.(*NodeDaemonEndpoints) + *out = *in + return nil + } +} + +// DeepCopy_api_NodeList is an autogenerated deepcopy function. +func DeepCopy_api_NodeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeList) + out := out.(*NodeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + if err := DeepCopy_api_Node(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_NodeProxyOptions is an autogenerated deepcopy function. +func DeepCopy_api_NodeProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeProxyOptions) + out := out.(*NodeProxyOptions) + *out = *in + return nil + } +} + +// DeepCopy_api_NodeResources is an autogenerated deepcopy function. +func DeepCopy_api_NodeResources(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeResources) + out := out.(*NodeResources) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +// DeepCopy_api_NodeSelector is an autogenerated deepcopy function. +func DeepCopy_api_NodeSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelector) + out := out.(*NodeSelector) + *out = *in + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_NodeSelectorTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_NodeSelectorRequirement is an autogenerated deepcopy function. +func DeepCopy_api_NodeSelectorRequirement(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelectorRequirement) + out := out.(*NodeSelectorRequirement) + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_NodeSelectorTerm is an autogenerated deepcopy function. +func DeepCopy_api_NodeSelectorTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelectorTerm) + out := out.(*NodeSelectorTerm) + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + if err := DeepCopy_api_NodeSelectorRequirement(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_NodeSpec is an autogenerated deepcopy function. +func DeepCopy_api_NodeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSpec) + out := out.(*NodeSpec) + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + for i := range *in { + if err := DeepCopy_api_Taint(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_NodeStatus is an autogenerated deepcopy function. +func DeepCopy_api_NodeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeStatus) + out := out.(*NodeStatus) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodeCondition, len(*in)) + for i := range *in { + if err := DeepCopy_api_NodeCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]NodeAddress, len(*in)) + copy(*out, *in) + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ContainerImage, len(*in)) + for i := range *in { + if err := DeepCopy_api_ContainerImage(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.VolumesInUse != nil { + in, out := &in.VolumesInUse, &out.VolumesInUse + *out = make([]UniqueVolumeName, len(*in)) + copy(*out, *in) + } + if in.VolumesAttached != nil { + in, out := &in.VolumesAttached, &out.VolumesAttached + *out = make([]AttachedVolume, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_NodeSystemInfo is an autogenerated deepcopy function. +func DeepCopy_api_NodeSystemInfo(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSystemInfo) + out := out.(*NodeSystemInfo) + *out = *in + return nil + } +} + +// DeepCopy_api_ObjectFieldSelector is an autogenerated deepcopy function. +func DeepCopy_api_ObjectFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectFieldSelector) + out := out.(*ObjectFieldSelector) + *out = *in + return nil + } +} + +// DeepCopy_api_ObjectMeta is an autogenerated deepcopy function. +func DeepCopy_api_ObjectMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMeta) + out := out.(*ObjectMeta) + *out = *in + out.CreationTimestamp = in.CreationTimestamp.DeepCopy() + if in.DeletionTimestamp != nil { + in, out := &in.DeletionTimestamp, &out.DeletionTimestamp + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]v1.OwnerReference, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*v1.OwnerReference) + } + } + } + if in.Initializers != nil { + in, out := &in.Initializers, &out.Initializers + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.Initializers) + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_ObjectReference is an autogenerated deepcopy function. +func DeepCopy_api_ObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectReference) + out := out.(*ObjectReference) + *out = *in + return nil + } +} + +// DeepCopy_api_PersistentVolume is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolume) + out := out.(*PersistentVolume) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PersistentVolumeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_PersistentVolumeClaim is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeClaim(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaim) + out := out.(*PersistentVolumeClaim) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_PersistentVolumeClaimStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_PersistentVolumeClaimList is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeClaimList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimList) + out := out.(*PersistentVolumeClaimList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := DeepCopy_api_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_PersistentVolumeClaimSpec is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeClaimSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimSpec) + out := out.(*PersistentVolumeClaimSpec) + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := DeepCopy_api_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { + return err + } + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + return nil + } +} + +// DeepCopy_api_PersistentVolumeClaimStatus is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeClaimStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimStatus) + out := out.(*PersistentVolumeClaimStatus) + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +// DeepCopy_api_PersistentVolumeClaimVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeClaimVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimVolumeSource) + out := out.(*PersistentVolumeClaimVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_PersistentVolumeList is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeList) + out := out.(*PersistentVolumeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + if err := DeepCopy_api_PersistentVolume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_PersistentVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeSource) + out := out.(*PersistentVolumeSource) + *out = *in + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + **out = **in + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := DeepCopy_api_RBDVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := DeepCopy_api_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := DeepCopy_api_FlexVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + **out = **in + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := DeepCopy_api_CephFSVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + if err := DeepCopy_api_FCVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + if err := DeepCopy_api_AzureDiskVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_api_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Local != nil { + in, out := &in.Local, &out.Local + *out = new(LocalVolumeSource) + **out = **in + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + *out = new(StorageOSPersistentVolumeSource) + if err := DeepCopy_api_StorageOSPersistentVolumeSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_PersistentVolumeSpec is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeSpec) + out := out.(*PersistentVolumeSpec) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if err := DeepCopy_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil { + return err + } + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.ClaimRef != nil { + in, out := &in.ClaimRef, &out.ClaimRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_api_PersistentVolumeStatus is an autogenerated deepcopy function. +func DeepCopy_api_PersistentVolumeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeStatus) + out := out.(*PersistentVolumeStatus) + *out = *in + return nil + } +} + +// DeepCopy_api_PhotonPersistentDiskVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_PhotonPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PhotonPersistentDiskVolumeSource) + out := out.(*PhotonPersistentDiskVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_Pod is an autogenerated deepcopy function. +func DeepCopy_api_Pod(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Pod) + out := out.(*Pod) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PodSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_PodStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_PodAffinity is an autogenerated deepcopy function. +func DeepCopy_api_PodAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAffinity) + out := out.(*PodAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_PodAffinityTerm is an autogenerated deepcopy function. +func DeepCopy_api_PodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAffinityTerm) + out := out.(*PodAffinityTerm) + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_PodAntiAffinity is an autogenerated deepcopy function. +func DeepCopy_api_PodAntiAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAntiAffinity) + out := out.(*PodAntiAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_PodAttachOptions is an autogenerated deepcopy function. +func DeepCopy_api_PodAttachOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAttachOptions) + out := out.(*PodAttachOptions) + *out = *in + return nil + } +} + +// DeepCopy_api_PodCondition is an autogenerated deepcopy function. +func DeepCopy_api_PodCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodCondition) + out := out.(*PodCondition) + *out = *in + out.LastProbeTime = in.LastProbeTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +// DeepCopy_api_PodExecOptions is an autogenerated deepcopy function. +func DeepCopy_api_PodExecOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodExecOptions) + out := out.(*PodExecOptions) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_PodList is an autogenerated deepcopy function. +func DeepCopy_api_PodList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodList) + out := out.(*PodList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + if err := DeepCopy_api_Pod(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_PodLogOptions is an autogenerated deepcopy function. +func DeepCopy_api_PodLogOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodLogOptions) + out := out.(*PodLogOptions) + *out = *in + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + return nil + } +} + +// DeepCopy_api_PodPortForwardOptions is an autogenerated deepcopy function. +func DeepCopy_api_PodPortForwardOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPortForwardOptions) + out := out.(*PodPortForwardOptions) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_PodProxyOptions is an autogenerated deepcopy function. +func DeepCopy_api_PodProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodProxyOptions) + out := out.(*PodProxyOptions) + *out = *in + return nil + } +} + +// DeepCopy_api_PodSecurityContext is an autogenerated deepcopy function. +func DeepCopy_api_PodSecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityContext) + out := out.(*PodSecurityContext) + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(types.UnixUserID) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]types.UnixGroupID, len(*in)) + copy(*out, *in) + } + if in.FSGroup != nil { + in, out := &in.FSGroup, &out.FSGroup + *out = new(types.UnixGroupID) + **out = **in + } + return nil + } +} + +// DeepCopy_api_PodSignature is an autogenerated deepcopy function. +func DeepCopy_api_PodSignature(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSignature) + out := out.(*PodSignature) + *out = *in + if in.PodController != nil { + in, out := &in.PodController, &out.PodController + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.OwnerReference) + } + } + return nil + } +} + +// DeepCopy_api_PodSpec is an autogenerated deepcopy function. +func DeepCopy_api_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSpec) + out := out.(*PodSpec) + *out = *in + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + if err := DeepCopy_api_Volume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]Container, len(*in)) + for i := range *in { + if err := DeepCopy_api_Container(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]Container, len(*in)) + for i := range *in { + if err := DeepCopy_api_Container(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(PodSecurityContext) + if err := DeepCopy_api_PodSecurityContext(*in, *out, c); err != nil { + return err + } + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(Affinity) + if err := DeepCopy_api_Affinity(*in, *out, c); err != nil { + return err + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + if err := DeepCopy_api_Toleration(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]HostAlias, len(*in)) + for i := range *in { + if err := DeepCopy_api_HostAlias(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_PodStatus is an autogenerated deepcopy function. +func DeepCopy_api_PodStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodStatus) + out := out.(*PodStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PodCondition, len(*in)) + for i := range *in { + if err := DeepCopy_api_PodCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := DeepCopy_api_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.ContainerStatuses != nil { + in, out := &in.ContainerStatuses, &out.ContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := DeepCopy_api_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_PodStatusResult is an autogenerated deepcopy function. +func DeepCopy_api_PodStatusResult(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodStatusResult) + out := out.(*PodStatusResult) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PodStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_PodTemplate is an autogenerated deepcopy function. +func DeepCopy_api_PodTemplate(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplate) + out := out.(*PodTemplate) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_PodTemplateList is an autogenerated deepcopy function. +func DeepCopy_api_PodTemplateList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplateList) + out := out.(*PodTemplateList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + if err := DeepCopy_api_PodTemplate(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_PodTemplateSpec is an autogenerated deepcopy function. +func DeepCopy_api_PodTemplateSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplateSpec) + out := out.(*PodTemplateSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PodSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_PortworxVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_PortworxVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PortworxVolumeSource) + out := out.(*PortworxVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_Preconditions is an autogenerated deepcopy function. +func DeepCopy_api_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Preconditions) + out := out.(*Preconditions) + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + return nil + } +} + +// DeepCopy_api_PreferAvoidPodsEntry is an autogenerated deepcopy function. +func DeepCopy_api_PreferAvoidPodsEntry(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PreferAvoidPodsEntry) + out := out.(*PreferAvoidPodsEntry) + *out = *in + if err := DeepCopy_api_PodSignature(&in.PodSignature, &out.PodSignature, c); err != nil { + return err + } + out.EvictionTime = in.EvictionTime.DeepCopy() + return nil + } +} + +// DeepCopy_api_PreferredSchedulingTerm is an autogenerated deepcopy function. +func DeepCopy_api_PreferredSchedulingTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PreferredSchedulingTerm) + out := out.(*PreferredSchedulingTerm) + *out = *in + if err := DeepCopy_api_NodeSelectorTerm(&in.Preference, &out.Preference, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_Probe is an autogenerated deepcopy function. +func DeepCopy_api_Probe(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Probe) + out := out.(*Probe) + *out = *in + if err := DeepCopy_api_Handler(&in.Handler, &out.Handler, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_ProjectedVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_ProjectedVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ProjectedVolumeSource) + out := out.(*ProjectedVolumeSource) + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]VolumeProjection, len(*in)) + for i := range *in { + if err := DeepCopy_api_VolumeProjection(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return nil + } +} + +// DeepCopy_api_QuobyteVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_QuobyteVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*QuobyteVolumeSource) + out := out.(*QuobyteVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_RBDVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_RBDVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RBDVolumeSource) + out := out.(*RBDVolumeSource) + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_api_RangeAllocation is an autogenerated deepcopy function. +func DeepCopy_api_RangeAllocation(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RangeAllocation) + out := out.(*RangeAllocation) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_ReplicationController is an autogenerated deepcopy function. +func DeepCopy_api_ReplicationController(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationController) + out := out.(*ReplicationController) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_ReplicationControllerSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_ReplicationControllerStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_ReplicationControllerCondition is an autogenerated deepcopy function. +func DeepCopy_api_ReplicationControllerCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerCondition) + out := out.(*ReplicationControllerCondition) + *out = *in + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +// DeepCopy_api_ReplicationControllerList is an autogenerated deepcopy function. +func DeepCopy_api_ReplicationControllerList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerList) + out := out.(*ReplicationControllerList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + if err := DeepCopy_api_ReplicationController(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_ReplicationControllerSpec is an autogenerated deepcopy function. +func DeepCopy_api_ReplicationControllerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerSpec) + out := out.(*ReplicationControllerSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(PodTemplateSpec) + if err := DeepCopy_api_PodTemplateSpec(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_ReplicationControllerStatus is an autogenerated deepcopy function. +func DeepCopy_api_ReplicationControllerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerStatus) + out := out.(*ReplicationControllerStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicationControllerCondition, len(*in)) + for i := range *in { + if err := DeepCopy_api_ReplicationControllerCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_ResourceFieldSelector is an autogenerated deepcopy function. +func DeepCopy_api_ResourceFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceFieldSelector) + out := out.(*ResourceFieldSelector) + *out = *in + out.Divisor = in.Divisor.DeepCopy() + return nil + } +} + +// DeepCopy_api_ResourceQuota is an autogenerated deepcopy function. +func DeepCopy_api_ResourceQuota(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuota) + out := out.(*ResourceQuota) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_ResourceQuotaSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_ResourceQuotaStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_ResourceQuotaList is an autogenerated deepcopy function. +func DeepCopy_api_ResourceQuotaList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaList) + out := out.(*ResourceQuotaList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceQuota, len(*in)) + for i := range *in { + if err := DeepCopy_api_ResourceQuota(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_ResourceQuotaSpec is an autogenerated deepcopy function. +func DeepCopy_api_ResourceQuotaSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaSpec) + out := out.(*ResourceQuotaSpec) + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ResourceQuotaScope, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_ResourceQuotaStatus is an autogenerated deepcopy function. +func DeepCopy_api_ResourceQuotaStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaStatus) + out := out.(*ResourceQuotaStatus) + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +// DeepCopy_api_ResourceRequirements is an autogenerated deepcopy function. +func DeepCopy_api_ResourceRequirements(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceRequirements) + out := out.(*ResourceRequirements) + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +// DeepCopy_api_SELinuxOptions is an autogenerated deepcopy function. +func DeepCopy_api_SELinuxOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SELinuxOptions) + out := out.(*SELinuxOptions) + *out = *in + return nil + } +} + +// DeepCopy_api_ScaleIOVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_ScaleIOVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleIOVolumeSource) + out := out.(*ScaleIOVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_api_Secret is an autogenerated deepcopy function. +func DeepCopy_api_Secret(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Secret) + out := out.(*Secret) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string][]byte) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*[]byte) + } + } + } + return nil + } +} + +// DeepCopy_api_SecretEnvSource is an autogenerated deepcopy function. +func DeepCopy_api_SecretEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretEnvSource) + out := out.(*SecretEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_SecretKeySelector is an autogenerated deepcopy function. +func DeepCopy_api_SecretKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretKeySelector) + out := out.(*SecretKeySelector) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_SecretList is an autogenerated deepcopy function. +func DeepCopy_api_SecretList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretList) + out := out.(*SecretList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + if err := DeepCopy_api_Secret(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_SecretProjection is an autogenerated deepcopy function. +func DeepCopy_api_SecretProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretProjection) + out := out.(*SecretProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_SecretVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_SecretVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretVolumeSource) + out := out.(*SecretVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_SecurityContext is an autogenerated deepcopy function. +func DeepCopy_api_SecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecurityContext) + out := out.(*SecurityContext) + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(Capabilities) + if err := DeepCopy_api_Capabilities(*in, *out, c); err != nil { + return err + } + } + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(types.UnixUserID) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.ReadOnlyRootFilesystem != nil { + in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_SerializedReference is an autogenerated deepcopy function. +func DeepCopy_api_SerializedReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SerializedReference) + out := out.(*SerializedReference) + *out = *in + return nil + } +} + +// DeepCopy_api_Service is an autogenerated deepcopy function. +func DeepCopy_api_Service(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Service) + out := out.(*Service) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_ServiceSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_ServiceStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_ServiceAccount is an autogenerated deepcopy function. +func DeepCopy_api_ServiceAccount(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceAccount) + out := out.(*ServiceAccount) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + return nil + } +} + +// DeepCopy_api_ServiceAccountList is an autogenerated deepcopy function. +func DeepCopy_api_ServiceAccountList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceAccountList) + out := out.(*ServiceAccountList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccount, len(*in)) + for i := range *in { + if err := DeepCopy_api_ServiceAccount(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_ServiceList is an autogenerated deepcopy function. +func DeepCopy_api_ServiceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceList) + out := out.(*ServiceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + if err := DeepCopy_api_Service(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +// DeepCopy_api_ServicePort is an autogenerated deepcopy function. +func DeepCopy_api_ServicePort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServicePort) + out := out.(*ServicePort) + *out = *in + return nil + } +} + +// DeepCopy_api_ServiceProxyOptions is an autogenerated deepcopy function. +func DeepCopy_api_ServiceProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceProxyOptions) + out := out.(*ServiceProxyOptions) + *out = *in + return nil + } +} + +// DeepCopy_api_ServiceSpec is an autogenerated deepcopy function. +func DeepCopy_api_ServiceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceSpec) + out := out.(*ServiceSpec) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ServicePort, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalIPs != nil { + in, out := &in.ExternalIPs, &out.ExternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +// DeepCopy_api_ServiceStatus is an autogenerated deepcopy function. +func DeepCopy_api_ServiceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceStatus) + out := out.(*ServiceStatus) + *out = *in + if err := DeepCopy_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_StorageOSPersistentVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_StorageOSPersistentVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageOSPersistentVolumeSource) + out := out.(*StorageOSPersistentVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_api_StorageOSVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_StorageOSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageOSVolumeSource) + out := out.(*StorageOSVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +// DeepCopy_api_Sysctl is an autogenerated deepcopy function. +func DeepCopy_api_Sysctl(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Sysctl) + out := out.(*Sysctl) + *out = *in + return nil + } +} + +// DeepCopy_api_TCPSocketAction is an autogenerated deepcopy function. +func DeepCopy_api_TCPSocketAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TCPSocketAction) + out := out.(*TCPSocketAction) + *out = *in + return nil + } +} + +// DeepCopy_api_Taint is an autogenerated deepcopy function. +func DeepCopy_api_Taint(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Taint) + out := out.(*Taint) + *out = *in + out.TimeAdded = in.TimeAdded.DeepCopy() + return nil + } +} + +// DeepCopy_api_Toleration is an autogenerated deepcopy function. +func DeepCopy_api_Toleration(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Toleration) + out := out.(*Toleration) + *out = *in + if in.TolerationSeconds != nil { + in, out := &in.TolerationSeconds, &out.TolerationSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +// DeepCopy_api_Volume is an autogenerated deepcopy function. +func DeepCopy_api_Volume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Volume) + out := out.(*Volume) + *out = *in + if err := DeepCopy_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, c); err != nil { + return err + } + return nil + } +} + +// DeepCopy_api_VolumeMount is an autogenerated deepcopy function. +func DeepCopy_api_VolumeMount(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeMount) + out := out.(*VolumeMount) + *out = *in + return nil + } +} + +// DeepCopy_api_VolumeProjection is an autogenerated deepcopy function. +func DeepCopy_api_VolumeProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeProjection) + out := out.(*VolumeProjection) + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretProjection) + if err := DeepCopy_api_SecretProjection(*in, *out, c); err != nil { + return err + } + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIProjection) + if err := DeepCopy_api_DownwardAPIProjection(*in, *out, c); err != nil { + return err + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapProjection) + if err := DeepCopy_api_ConfigMapProjection(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_VolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_VolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeSource) + out := out.(*VolumeSource) + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + **out = **in + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirVolumeSource) + if err := DeepCopy_api_EmptyDirVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.GitRepo != nil { + in, out := &in.GitRepo, &out.GitRepo + *out = new(GitRepoVolumeSource) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretVolumeSource) + if err := DeepCopy_api_SecretVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := DeepCopy_api_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := DeepCopy_api_RBDVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := DeepCopy_api_FlexVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + **out = **in + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := DeepCopy_api_CephFSVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIVolumeSource) + if err := DeepCopy_api_DownwardAPIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + if err := DeepCopy_api_FCVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapVolumeSource) + if err := DeepCopy_api_ConfigMapVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + if err := DeepCopy_api_AzureDiskVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(ProjectedVolumeSource) + if err := DeepCopy_api_ProjectedVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_api_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + *out = new(StorageOSVolumeSource) + if err := DeepCopy_api_StorageOSVolumeSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +// DeepCopy_api_VsphereVirtualDiskVolumeSource is an autogenerated deepcopy function. +func DeepCopy_api_VsphereVirtualDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VsphereVirtualDiskVolumeSource) + out := out.(*VsphereVirtualDiskVolumeSource) + *out = *in + return nil + } +} + +// DeepCopy_api_WeightedPodAffinityTerm is an autogenerated deepcopy function. +func DeepCopy_api_WeightedPodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*WeightedPodAffinityTerm) + out := out.(*WeightedPodAffinityTerm) + *out = *in + if err := DeepCopy_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { + return err + } + return nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/constants.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/constants.go new file mode 100644 index 000000000..e7ccd58ae --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/constants.go @@ -0,0 +1,23 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package portforward contains server-side logic for handling port forwarding requests. +package portforward + +// The subprotocol "portforward.k8s.io" is used for port forwarding. +const ProtocolV1Name = "portforward.k8s.io" + +var SupportedProtocols = []string{ProtocolV1Name} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go new file mode 100644 index 000000000..5f872c820 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go @@ -0,0 +1,309 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package portforward + +import ( + "errors" + "fmt" + "net/http" + "strconv" + "sync" + "time" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/httpstream/spdy" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/kubernetes/pkg/api" + + "github.com/golang/glog" +) + +func handleHttpStreams(req *http.Request, w http.ResponseWriter, portForwarder PortForwarder, podName string, uid types.UID, supportedPortForwardProtocols []string, idleTimeout, streamCreationTimeout time.Duration) error { + _, err := httpstream.Handshake(req, w, supportedPortForwardProtocols) + // negotiated protocol isn't currently used server side, but could be in the future + if err != nil { + // Handshake writes the error to the client + return err + } + streamChan := make(chan httpstream.Stream, 1) + + glog.V(5).Infof("Upgrading port forward response") + upgrader := spdy.NewResponseUpgrader() + conn := upgrader.UpgradeResponse(w, req, httpStreamReceived(streamChan)) + if conn == nil { + return errors.New("Unable to upgrade websocket connection") + } + defer conn.Close() + + glog.V(5).Infof("(conn=%p) setting port forwarding streaming connection idle timeout to %v", conn, idleTimeout) + conn.SetIdleTimeout(idleTimeout) + + h := &httpStreamHandler{ + conn: conn, + streamChan: streamChan, + streamPairs: make(map[string]*httpStreamPair), + streamCreationTimeout: streamCreationTimeout, + pod: podName, + uid: uid, + forwarder: portForwarder, + } + h.run() + + return nil +} + +// httpStreamReceived is the httpstream.NewStreamHandler for port +// forward streams. It checks each stream's port and stream type headers, +// rejecting any streams that with missing or invalid values. Each valid +// stream is sent to the streams channel. +func httpStreamReceived(streams chan httpstream.Stream) func(httpstream.Stream, <-chan struct{}) error { + return func(stream httpstream.Stream, replySent <-chan struct{}) error { + // make sure it has a valid port header + portString := stream.Headers().Get(api.PortHeader) + if len(portString) == 0 { + return fmt.Errorf("%q header is required", api.PortHeader) + } + port, err := strconv.ParseUint(portString, 10, 16) + if err != nil { + return fmt.Errorf("unable to parse %q as a port: %v", portString, err) + } + if port < 1 { + return fmt.Errorf("port %q must be > 0", portString) + } + + // make sure it has a valid stream type header + streamType := stream.Headers().Get(api.StreamType) + if len(streamType) == 0 { + return fmt.Errorf("%q header is required", api.StreamType) + } + if streamType != api.StreamTypeError && streamType != api.StreamTypeData { + return fmt.Errorf("invalid stream type %q", streamType) + } + + streams <- stream + return nil + } +} + +// httpStreamHandler is capable of processing multiple port forward +// requests over a single httpstream.Connection. +type httpStreamHandler struct { + conn httpstream.Connection + streamChan chan httpstream.Stream + streamPairsLock sync.RWMutex + streamPairs map[string]*httpStreamPair + streamCreationTimeout time.Duration + pod string + uid types.UID + forwarder PortForwarder +} + +// getStreamPair returns a httpStreamPair for requestID. This creates a +// new pair if one does not yet exist for the requestID. The returned bool is +// true if the pair was created. +func (h *httpStreamHandler) getStreamPair(requestID string) (*httpStreamPair, bool) { + h.streamPairsLock.Lock() + defer h.streamPairsLock.Unlock() + + if p, ok := h.streamPairs[requestID]; ok { + glog.V(5).Infof("(conn=%p, request=%s) found existing stream pair", h.conn, requestID) + return p, false + } + + glog.V(5).Infof("(conn=%p, request=%s) creating new stream pair", h.conn, requestID) + + p := newPortForwardPair(requestID) + h.streamPairs[requestID] = p + + return p, true +} + +// monitorStreamPair waits for the pair to receive both its error and data +// streams, or for the timeout to expire (whichever happens first), and then +// removes the pair. +func (h *httpStreamHandler) monitorStreamPair(p *httpStreamPair, timeout <-chan time.Time) { + select { + case <-timeout: + err := fmt.Errorf("(conn=%v, request=%s) timed out waiting for streams", h.conn, p.requestID) + utilruntime.HandleError(err) + p.printError(err.Error()) + case <-p.complete: + glog.V(5).Infof("(conn=%v, request=%s) successfully received error and data streams", h.conn, p.requestID) + } + h.removeStreamPair(p.requestID) +} + +// hasStreamPair returns a bool indicating if a stream pair for requestID +// exists. +func (h *httpStreamHandler) hasStreamPair(requestID string) bool { + h.streamPairsLock.RLock() + defer h.streamPairsLock.RUnlock() + + _, ok := h.streamPairs[requestID] + return ok +} + +// removeStreamPair removes the stream pair identified by requestID from streamPairs. +func (h *httpStreamHandler) removeStreamPair(requestID string) { + h.streamPairsLock.Lock() + defer h.streamPairsLock.Unlock() + + delete(h.streamPairs, requestID) +} + +// requestID returns the request id for stream. +func (h *httpStreamHandler) requestID(stream httpstream.Stream) string { + requestID := stream.Headers().Get(api.PortForwardRequestIDHeader) + if len(requestID) == 0 { + glog.V(5).Infof("(conn=%p) stream received without %s header", h.conn, api.PortForwardRequestIDHeader) + // If we get here, it's because the connection came from an older client + // that isn't generating the request id header + // (https://github.com/kubernetes/kubernetes/blob/843134885e7e0b360eb5441e85b1410a8b1a7a0c/pkg/client/unversioned/portforward/portforward.go#L258-L287) + // + // This is a best-effort attempt at supporting older clients. + // + // When there aren't concurrent new forwarded connections, each connection + // will have a pair of streams (data, error), and the stream IDs will be + // consecutive odd numbers, e.g. 1 and 3 for the first connection. Convert + // the stream ID into a pseudo-request id by taking the stream type and + // using id = stream.Identifier() when the stream type is error, + // and id = stream.Identifier() - 2 when it's data. + // + // NOTE: this only works when there are not concurrent new streams from + // multiple forwarded connections; it's a best-effort attempt at supporting + // old clients that don't generate request ids. If there are concurrent + // new connections, it's possible that 1 connection gets streams whose IDs + // are not consecutive (e.g. 5 and 9 instead of 5 and 7). + streamType := stream.Headers().Get(api.StreamType) + switch streamType { + case api.StreamTypeError: + requestID = strconv.Itoa(int(stream.Identifier())) + case api.StreamTypeData: + requestID = strconv.Itoa(int(stream.Identifier()) - 2) + } + + glog.V(5).Infof("(conn=%p) automatically assigning request ID=%q from stream type=%s, stream ID=%d", h.conn, requestID, streamType, stream.Identifier()) + } + return requestID +} + +// run is the main loop for the httpStreamHandler. It processes new +// streams, invoking portForward for each complete stream pair. The loop exits +// when the httpstream.Connection is closed. +func (h *httpStreamHandler) run() { + glog.V(5).Infof("(conn=%p) waiting for port forward streams", h.conn) +Loop: + for { + select { + case <-h.conn.CloseChan(): + glog.V(5).Infof("(conn=%p) upgraded connection closed", h.conn) + break Loop + case stream := <-h.streamChan: + requestID := h.requestID(stream) + streamType := stream.Headers().Get(api.StreamType) + glog.V(5).Infof("(conn=%p, request=%s) received new stream of type %s", h.conn, requestID, streamType) + + p, created := h.getStreamPair(requestID) + if created { + go h.monitorStreamPair(p, time.After(h.streamCreationTimeout)) + } + if complete, err := p.add(stream); err != nil { + msg := fmt.Sprintf("error processing stream for request %s: %v", requestID, err) + utilruntime.HandleError(errors.New(msg)) + p.printError(msg) + } else if complete { + go h.portForward(p) + } + } + } +} + +// portForward invokes the httpStreamHandler's forwarder.PortForward +// function for the given stream pair. +func (h *httpStreamHandler) portForward(p *httpStreamPair) { + defer p.dataStream.Close() + defer p.errorStream.Close() + + portString := p.dataStream.Headers().Get(api.PortHeader) + port, _ := strconv.ParseInt(portString, 10, 32) + + glog.V(5).Infof("(conn=%p, request=%s) invoking forwarder.PortForward for port %s", h.conn, p.requestID, portString) + err := h.forwarder.PortForward(h.pod, h.uid, int32(port), p.dataStream) + glog.V(5).Infof("(conn=%p, request=%s) done invoking forwarder.PortForward for port %s", h.conn, p.requestID, portString) + + if err != nil { + msg := fmt.Errorf("error forwarding port %d to pod %s, uid %v: %v", port, h.pod, h.uid, err) + utilruntime.HandleError(msg) + fmt.Fprint(p.errorStream, msg.Error()) + } +} + +// httpStreamPair represents the error and data streams for a port +// forwarding request. +type httpStreamPair struct { + lock sync.RWMutex + requestID string + dataStream httpstream.Stream + errorStream httpstream.Stream + complete chan struct{} +} + +// newPortForwardPair creates a new httpStreamPair. +func newPortForwardPair(requestID string) *httpStreamPair { + return &httpStreamPair{ + requestID: requestID, + complete: make(chan struct{}), + } +} + +// add adds the stream to the httpStreamPair. If the pair already +// contains a stream for the new stream's type, an error is returned. add +// returns true if both the data and error streams for this pair have been +// received. +func (p *httpStreamPair) add(stream httpstream.Stream) (bool, error) { + p.lock.Lock() + defer p.lock.Unlock() + + switch stream.Headers().Get(api.StreamType) { + case api.StreamTypeError: + if p.errorStream != nil { + return false, errors.New("error stream already assigned") + } + p.errorStream = stream + case api.StreamTypeData: + if p.dataStream != nil { + return false, errors.New("data stream already assigned") + } + p.dataStream = stream + } + + complete := p.errorStream != nil && p.dataStream != nil + if complete { + close(p.complete) + } + return complete, nil +} + +// printError writes s to p.errorStream if p.errorStream has been set. +func (p *httpStreamPair) printError(s string) { + p.lock.RLock() + defer p.lock.RUnlock() + if p.errorStream != nil { + fmt.Fprint(p.errorStream, s) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/portforward.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/portforward.go new file mode 100644 index 000000000..60a96e51a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/portforward.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package portforward + +import ( + "io" + "net/http" + "time" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/util/wsstream" +) + +// PortForwarder knows how to forward content from a data stream to/from a port +// in a pod. +type PortForwarder interface { + // PortForwarder copies data between a data stream and a port in a pod. + PortForward(name string, uid types.UID, port int32, stream io.ReadWriteCloser) error +} + +// ServePortForward handles a port forwarding request. A single request is +// kept alive as long as the client is still alive and the connection has not +// been timed out due to idleness. This function handles multiple forwarded +// connections; i.e., multiple `curl http://localhost:8888/` requests will be +// handled by a single invocation of ServePortForward. +func ServePortForward(w http.ResponseWriter, req *http.Request, portForwarder PortForwarder, podName string, uid types.UID, portForwardOptions *V4Options, idleTimeout time.Duration, streamCreationTimeout time.Duration, supportedProtocols []string) { + var err error + if wsstream.IsWebSocketRequest(req) { + err = handleWebSocketStreams(req, w, portForwarder, podName, uid, portForwardOptions, supportedProtocols, idleTimeout, streamCreationTimeout) + } else { + err = handleHttpStreams(req, w, portForwarder, podName, uid, supportedProtocols, idleTimeout, streamCreationTimeout) + } + + if err != nil { + runtime.HandleError(err) + return + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go new file mode 100644 index 000000000..22d5add06 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go @@ -0,0 +1,198 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package portforward + +import ( + "encoding/binary" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/server/httplog" + "k8s.io/apiserver/pkg/util/wsstream" + "k8s.io/kubernetes/pkg/api" +) + +const ( + dataChannel = iota + errorChannel + + v4BinaryWebsocketProtocol = "v4." + wsstream.ChannelWebSocketProtocol + v4Base64WebsocketProtocol = "v4." + wsstream.Base64ChannelWebSocketProtocol +) + +// options contains details about which streams are required for +// port forwarding. +// All fields incldued in V4Options need to be expressed explicilty in the +// CRI (pkg/kubelet/apis/cri/{version}/api.proto) PortForwardRequest. +type V4Options struct { + Ports []int32 +} + +// newOptions creates a new options from the Request. +func NewV4Options(req *http.Request) (*V4Options, error) { + if !wsstream.IsWebSocketRequest(req) { + return &V4Options{}, nil + } + + portStrings := req.URL.Query()[api.PortHeader] + if len(portStrings) == 0 { + return nil, fmt.Errorf("query parameter %q is required", api.PortHeader) + } + + ports := make([]int32, 0, len(portStrings)) + for _, portString := range portStrings { + if len(portString) == 0 { + return nil, fmt.Errorf("query parameter %q cannot be empty", api.PortHeader) + } + for _, p := range strings.Split(portString, ",") { + port, err := strconv.ParseUint(p, 10, 16) + if err != nil { + return nil, fmt.Errorf("unable to parse %q as a port: %v", portString, err) + } + if port < 1 { + return nil, fmt.Errorf("port %q must be > 0", portString) + } + ports = append(ports, int32(port)) + } + } + + return &V4Options{ + Ports: ports, + }, nil +} + +// BuildV4Options returns a V4Options based on the given information. +func BuildV4Options(ports []int32) (*V4Options, error) { + return &V4Options{Ports: ports}, nil +} + +// handleWebSocketStreams handles requests to forward ports to a pod via +// a PortForwarder. A pair of streams are created per port (DATA n, +// ERROR n+1). The associated port is written to each stream as a unsigned 16 +// bit integer in little endian format. +func handleWebSocketStreams(req *http.Request, w http.ResponseWriter, portForwarder PortForwarder, podName string, uid types.UID, opts *V4Options, supportedPortForwardProtocols []string, idleTimeout, streamCreationTimeout time.Duration) error { + channels := make([]wsstream.ChannelType, 0, len(opts.Ports)*2) + for i := 0; i < len(opts.Ports); i++ { + channels = append(channels, wsstream.ReadWriteChannel, wsstream.WriteChannel) + } + conn := wsstream.NewConn(map[string]wsstream.ChannelProtocolConfig{ + "": { + Binary: true, + Channels: channels, + }, + v4BinaryWebsocketProtocol: { + Binary: true, + Channels: channels, + }, + v4Base64WebsocketProtocol: { + Binary: false, + Channels: channels, + }, + }) + conn.SetIdleTimeout(idleTimeout) + _, streams, err := conn.Open(httplog.Unlogged(w), req) + if err != nil { + err = fmt.Errorf("Unable to upgrade websocket connection: %v", err) + return err + } + defer conn.Close() + streamPairs := make([]*websocketStreamPair, len(opts.Ports)) + for i := range streamPairs { + streamPair := websocketStreamPair{ + port: opts.Ports[i], + dataStream: streams[i*2+dataChannel], + errorStream: streams[i*2+errorChannel], + } + streamPairs[i] = &streamPair + + portBytes := make([]byte, 2) + // port is always positive so conversion is allowable + binary.LittleEndian.PutUint16(portBytes, uint16(streamPair.port)) + streamPair.dataStream.Write(portBytes) + streamPair.errorStream.Write(portBytes) + } + h := &websocketStreamHandler{ + conn: conn, + streamPairs: streamPairs, + pod: podName, + uid: uid, + forwarder: portForwarder, + } + h.run() + + return nil +} + +// websocketStreamPair represents the error and data streams for a port +// forwarding request. +type websocketStreamPair struct { + port int32 + dataStream io.ReadWriteCloser + errorStream io.WriteCloser +} + +// websocketStreamHandler is capable of processing a single port forward +// request over a websocket connection +type websocketStreamHandler struct { + conn *wsstream.Conn + ports []int32 + streamPairs []*websocketStreamPair + pod string + uid types.UID + forwarder PortForwarder +} + +// run invokes the websocketStreamHandler's forwarder.PortForward +// function for the given stream pair. +func (h *websocketStreamHandler) run() { + wg := sync.WaitGroup{} + wg.Add(len(h.streamPairs)) + + for _, pair := range h.streamPairs { + p := pair + go func() { + defer wg.Done() + h.portForward(p) + }() + } + + wg.Wait() +} + +func (h *websocketStreamHandler) portForward(p *websocketStreamPair) { + defer p.dataStream.Close() + defer p.errorStream.Close() + + glog.V(5).Infof("(conn=%p) invoking forwarder.PortForward for port %d", h.conn, p.port) + err := h.forwarder.PortForward(h.pod, h.uid, p.port, p.dataStream) + glog.V(5).Infof("(conn=%p) done invoking forwarder.PortForward for port %d", h.conn, p.port) + + if err != nil { + msg := fmt.Errorf("error forwarding port %d to pod %s, uid %v: %v", p.port, h.pod, h.uid, err) + runtime.HandleError(msg) + fmt.Fprint(p.errorStream, msg.Error()) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/attach.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/attach.go new file mode 100644 index 000000000..e266f34fe --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/attach.go @@ -0,0 +1,59 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + "net/http" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/remotecommand" +) + +// Attacher knows how to attach to a running container in a pod. +type Attacher interface { + // AttachContainer attaches to the running container in the pod, copying data between in/out/err + // and the container's stdin/stdout/stderr. + AttachContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error +} + +// ServeAttach handles requests to attach to a container. After creating/receiving the required +// streams, it delegates the actual attaching to attacher. +func ServeAttach(w http.ResponseWriter, req *http.Request, attacher Attacher, podName string, uid types.UID, container string, streamOpts *Options, idleTimeout, streamCreationTimeout time.Duration, supportedProtocols []string) { + ctx, ok := createStreams(req, w, streamOpts, supportedProtocols, idleTimeout, streamCreationTimeout) + if !ok { + // error is handled by createStreams + return + } + defer ctx.conn.Close() + + err := attacher.AttachContainer(podName, uid, container, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan) + if err != nil { + err = fmt.Errorf("error attaching to container: %v", err) + runtime.HandleError(err) + ctx.writeStatus(apierrors.NewInternalError(err)) + } else { + ctx.writeStatus(&apierrors.StatusError{ErrStatus: metav1.Status{ + Status: metav1.StatusSuccess, + }}) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/doc.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/doc.go new file mode 100644 index 000000000..24f9393ab --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package remotecommand contains functions related to executing commands in and attaching to pods. +package remotecommand // import "k8s.io/kubernetes/pkg/kubelet/server/remotecommand" diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/exec.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/exec.go new file mode 100644 index 000000000..8d14a937a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/exec.go @@ -0,0 +1,79 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + "net/http" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/remotecommand" + utilexec "k8s.io/kubernetes/pkg/util/exec" +) + +// Executor knows how to execute a command in a container in a pod. +type Executor interface { + // ExecInContainer executes a command in a container in the pod, copying data + // between in/out/err and the container's stdin/stdout/stderr. + ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error +} + +// ServeExec handles requests to execute a command in a container. After +// creating/receiving the required streams, it delegates the actual execution +// to the executor. +func ServeExec(w http.ResponseWriter, req *http.Request, executor Executor, podName string, uid types.UID, container string, cmd []string, streamOpts *Options, idleTimeout, streamCreationTimeout time.Duration, supportedProtocols []string) { + ctx, ok := createStreams(req, w, streamOpts, supportedProtocols, idleTimeout, streamCreationTimeout) + if !ok { + // error is handled by createStreams + return + } + defer ctx.conn.Close() + + err := executor.ExecInContainer(podName, uid, container, cmd, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan, 0) + if err != nil { + if exitErr, ok := err.(utilexec.ExitError); ok && exitErr.Exited() { + rc := exitErr.ExitStatus() + ctx.writeStatus(&apierrors.StatusError{ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Reason: remotecommandconsts.NonZeroExitCodeReason, + Details: &metav1.StatusDetails{ + Causes: []metav1.StatusCause{ + { + Type: remotecommandconsts.ExitCodeCauseType, + Message: fmt.Sprintf("%d", rc), + }, + }, + }, + Message: fmt.Sprintf("command terminated with non-zero exit code: %v", exitErr), + }}) + } else { + err = fmt.Errorf("error executing command in container: %v", err) + runtime.HandleError(err) + ctx.writeStatus(apierrors.NewInternalError(err)) + } + } else { + ctx.writeStatus(&apierrors.StatusError{ErrStatus: metav1.Status{ + Status: metav1.StatusSuccess, + }}) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go new file mode 100644 index 000000000..f09b5e400 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go @@ -0,0 +1,447 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/httpstream/spdy" + remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/util/wsstream" + "k8s.io/client-go/tools/remotecommand" + "k8s.io/kubernetes/pkg/api" + + "github.com/golang/glog" +) + +// Options contains details about which streams are required for +// remote command execution. +type Options struct { + Stdin bool + Stdout bool + Stderr bool + TTY bool +} + +// NewOptions creates a new Options from the Request. +func NewOptions(req *http.Request) (*Options, error) { + tty := req.FormValue(api.ExecTTYParam) == "1" + stdin := req.FormValue(api.ExecStdinParam) == "1" + stdout := req.FormValue(api.ExecStdoutParam) == "1" + stderr := req.FormValue(api.ExecStderrParam) == "1" + if tty && stderr { + // TODO: make this an error before we reach this method + glog.V(4).Infof("Access to exec with tty and stderr is not supported, bypassing stderr") + stderr = false + } + + if !stdin && !stdout && !stderr { + return nil, fmt.Errorf("you must specify at least 1 of stdin, stdout, stderr") + } + + return &Options{ + Stdin: stdin, + Stdout: stdout, + Stderr: stderr, + TTY: tty, + }, nil +} + +// context contains the connection and streams used when +// forwarding an attach or execute session into a container. +type context struct { + conn io.Closer + stdinStream io.ReadCloser + stdoutStream io.WriteCloser + stderrStream io.WriteCloser + writeStatus func(status *apierrors.StatusError) error + resizeStream io.ReadCloser + resizeChan chan remotecommand.TerminalSize + tty bool +} + +// streamAndReply holds both a Stream and a channel that is closed when the stream's reply frame is +// enqueued. Consumers can wait for replySent to be closed prior to proceeding, to ensure that the +// replyFrame is enqueued before the connection's goaway frame is sent (e.g. if a stream was +// received and right after, the connection gets closed). +type streamAndReply struct { + httpstream.Stream + replySent <-chan struct{} +} + +// waitStreamReply waits until either replySent or stop is closed. If replySent is closed, it sends +// an empty struct to the notify channel. +func waitStreamReply(replySent <-chan struct{}, notify chan<- struct{}, stop <-chan struct{}) { + select { + case <-replySent: + notify <- struct{}{} + case <-stop: + } +} + +func createStreams(req *http.Request, w http.ResponseWriter, opts *Options, supportedStreamProtocols []string, idleTimeout, streamCreationTimeout time.Duration) (*context, bool) { + var ctx *context + var ok bool + if wsstream.IsWebSocketRequest(req) { + ctx, ok = createWebSocketStreams(req, w, opts, idleTimeout) + } else { + ctx, ok = createHttpStreamStreams(req, w, opts, supportedStreamProtocols, idleTimeout, streamCreationTimeout) + } + if !ok { + return nil, false + } + + if ctx.resizeStream != nil { + ctx.resizeChan = make(chan remotecommand.TerminalSize) + go handleResizeEvents(ctx.resizeStream, ctx.resizeChan) + } + + return ctx, true +} + +func createHttpStreamStreams(req *http.Request, w http.ResponseWriter, opts *Options, supportedStreamProtocols []string, idleTimeout, streamCreationTimeout time.Duration) (*context, bool) { + protocol, err := httpstream.Handshake(req, w, supportedStreamProtocols) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprint(w, err.Error()) + return nil, false + } + + streamCh := make(chan streamAndReply) + + upgrader := spdy.NewResponseUpgrader() + conn := upgrader.UpgradeResponse(w, req, func(stream httpstream.Stream, replySent <-chan struct{}) error { + streamCh <- streamAndReply{Stream: stream, replySent: replySent} + return nil + }) + // from this point on, we can no longer call methods on response + if conn == nil { + // The upgrader is responsible for notifying the client of any errors that + // occurred during upgrading. All we can do is return here at this point + // if we weren't successful in upgrading. + return nil, false + } + + conn.SetIdleTimeout(idleTimeout) + + var handler protocolHandler + switch protocol { + case remotecommandconsts.StreamProtocolV4Name: + handler = &v4ProtocolHandler{} + case remotecommandconsts.StreamProtocolV3Name: + handler = &v3ProtocolHandler{} + case remotecommandconsts.StreamProtocolV2Name: + handler = &v2ProtocolHandler{} + case "": + glog.V(4).Infof("Client did not request protocol negotiaion. Falling back to %q", remotecommandconsts.StreamProtocolV1Name) + fallthrough + case remotecommandconsts.StreamProtocolV1Name: + handler = &v1ProtocolHandler{} + } + + // count the streams client asked for, starting with 1 + expectedStreams := 1 + if opts.Stdin { + expectedStreams++ + } + if opts.Stdout { + expectedStreams++ + } + if opts.Stderr { + expectedStreams++ + } + if opts.TTY && handler.supportsTerminalResizing() { + expectedStreams++ + } + + expired := time.NewTimer(streamCreationTimeout) + defer expired.Stop() + + ctx, err := handler.waitForStreams(streamCh, expectedStreams, expired.C) + if err != nil { + runtime.HandleError(err) + return nil, false + } + + ctx.conn = conn + ctx.tty = opts.TTY + + return ctx, true +} + +type protocolHandler interface { + // waitForStreams waits for the expected streams or a timeout, returning a + // remoteCommandContext if all the streams were received, or an error if not. + waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) + // supportsTerminalResizing returns true if the protocol handler supports terminal resizing + supportsTerminalResizing() bool +} + +// v4ProtocolHandler implements the V4 protocol version for streaming command execution. It only differs +// in from v3 in the error stream format using an json-marshaled metav1.Status which carries +// the process' exit code. +type v4ProtocolHandler struct{} + +func (*v4ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) { + ctx := &context{} + receivedStreams := 0 + replyChan := make(chan struct{}) + stop := make(chan struct{}) + defer close(stop) +WaitForStreams: + for { + select { + case stream := <-streams: + streamType := stream.Headers().Get(api.StreamType) + switch streamType { + case api.StreamTypeError: + ctx.writeStatus = v4WriteStatusFunc(stream) // write json errors + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStdin: + ctx.stdinStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStdout: + ctx.stdoutStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStderr: + ctx.stderrStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeResize: + ctx.resizeStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + default: + runtime.HandleError(fmt.Errorf("Unexpected stream type: %q", streamType)) + } + case <-replyChan: + receivedStreams++ + if receivedStreams == expectedStreams { + break WaitForStreams + } + case <-expired: + // TODO find a way to return the error to the user. Maybe use a separate + // stream to report errors? + return nil, errors.New("timed out waiting for client to create streams") + } + } + + return ctx, nil +} + +// supportsTerminalResizing returns true because v4ProtocolHandler supports it +func (*v4ProtocolHandler) supportsTerminalResizing() bool { return true } + +// v3ProtocolHandler implements the V3 protocol version for streaming command execution. +type v3ProtocolHandler struct{} + +func (*v3ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) { + ctx := &context{} + receivedStreams := 0 + replyChan := make(chan struct{}) + stop := make(chan struct{}) + defer close(stop) +WaitForStreams: + for { + select { + case stream := <-streams: + streamType := stream.Headers().Get(api.StreamType) + switch streamType { + case api.StreamTypeError: + ctx.writeStatus = v1WriteStatusFunc(stream) + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStdin: + ctx.stdinStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStdout: + ctx.stdoutStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStderr: + ctx.stderrStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeResize: + ctx.resizeStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + default: + runtime.HandleError(fmt.Errorf("Unexpected stream type: %q", streamType)) + } + case <-replyChan: + receivedStreams++ + if receivedStreams == expectedStreams { + break WaitForStreams + } + case <-expired: + // TODO find a way to return the error to the user. Maybe use a separate + // stream to report errors? + return nil, errors.New("timed out waiting for client to create streams") + } + } + + return ctx, nil +} + +// supportsTerminalResizing returns true because v3ProtocolHandler supports it +func (*v3ProtocolHandler) supportsTerminalResizing() bool { return true } + +// v2ProtocolHandler implements the V2 protocol version for streaming command execution. +type v2ProtocolHandler struct{} + +func (*v2ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) { + ctx := &context{} + receivedStreams := 0 + replyChan := make(chan struct{}) + stop := make(chan struct{}) + defer close(stop) +WaitForStreams: + for { + select { + case stream := <-streams: + streamType := stream.Headers().Get(api.StreamType) + switch streamType { + case api.StreamTypeError: + ctx.writeStatus = v1WriteStatusFunc(stream) + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStdin: + ctx.stdinStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStdout: + ctx.stdoutStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStderr: + ctx.stderrStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + default: + runtime.HandleError(fmt.Errorf("Unexpected stream type: %q", streamType)) + } + case <-replyChan: + receivedStreams++ + if receivedStreams == expectedStreams { + break WaitForStreams + } + case <-expired: + // TODO find a way to return the error to the user. Maybe use a separate + // stream to report errors? + return nil, errors.New("timed out waiting for client to create streams") + } + } + + return ctx, nil +} + +// supportsTerminalResizing returns false because v2ProtocolHandler doesn't support it. +func (*v2ProtocolHandler) supportsTerminalResizing() bool { return false } + +// v1ProtocolHandler implements the V1 protocol version for streaming command execution. +type v1ProtocolHandler struct{} + +func (*v1ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) { + ctx := &context{} + receivedStreams := 0 + replyChan := make(chan struct{}) + stop := make(chan struct{}) + defer close(stop) +WaitForStreams: + for { + select { + case stream := <-streams: + streamType := stream.Headers().Get(api.StreamType) + switch streamType { + case api.StreamTypeError: + ctx.writeStatus = v1WriteStatusFunc(stream) + + // This defer statement shouldn't be here, but due to previous refactoring, it ended up in + // here. This is what 1.0.x kubelets do, so we're retaining that behavior. This is fixed in + // the v2ProtocolHandler. + defer stream.Reset() + + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStdin: + ctx.stdinStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStdout: + ctx.stdoutStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStderr: + ctx.stderrStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + default: + runtime.HandleError(fmt.Errorf("Unexpected stream type: %q", streamType)) + } + case <-replyChan: + receivedStreams++ + if receivedStreams == expectedStreams { + break WaitForStreams + } + case <-expired: + // TODO find a way to return the error to the user. Maybe use a separate + // stream to report errors? + return nil, errors.New("timed out waiting for client to create streams") + } + } + + if ctx.stdinStream != nil { + ctx.stdinStream.Close() + } + + return ctx, nil +} + +// supportsTerminalResizing returns false because v1ProtocolHandler doesn't support it. +func (*v1ProtocolHandler) supportsTerminalResizing() bool { return false } + +func handleResizeEvents(stream io.Reader, channel chan<- remotecommand.TerminalSize) { + defer runtime.HandleCrash() + + decoder := json.NewDecoder(stream) + for { + size := remotecommand.TerminalSize{} + if err := decoder.Decode(&size); err != nil { + break + } + channel <- size + } +} + +func v1WriteStatusFunc(stream io.WriteCloser) func(status *apierrors.StatusError) error { + return func(status *apierrors.StatusError) error { + if status.Status().Status == metav1.StatusSuccess { + return nil // send error messages + } + _, err := stream.Write([]byte(status.Error())) + return err + } +} + +// v4WriteStatusFunc returns a WriteStatusFunc that marshals a given api Status +// as json in the error channel. +func v4WriteStatusFunc(stream io.WriteCloser) func(status *apierrors.StatusError) error { + return func(status *apierrors.StatusError) error { + bs, err := json.Marshal(status.Status()) + if err != nil { + return err + } + _, err = stream.Write(bs) + return err + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/websocket.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/websocket.go new file mode 100644 index 000000000..c60012b21 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/websocket.go @@ -0,0 +1,132 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "net/http" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/server/httplog" + "k8s.io/apiserver/pkg/util/wsstream" +) + +const ( + stdinChannel = iota + stdoutChannel + stderrChannel + errorChannel + resizeChannel + + preV4BinaryWebsocketProtocol = wsstream.ChannelWebSocketProtocol + preV4Base64WebsocketProtocol = wsstream.Base64ChannelWebSocketProtocol + v4BinaryWebsocketProtocol = "v4." + wsstream.ChannelWebSocketProtocol + v4Base64WebsocketProtocol = "v4." + wsstream.Base64ChannelWebSocketProtocol +) + +// createChannels returns the standard channel types for a shell connection (STDIN 0, STDOUT 1, STDERR 2) +// along with the approximate duplex value. It also creates the error (3) and resize (4) channels. +func createChannels(opts *Options) []wsstream.ChannelType { + // open the requested channels, and always open the error channel + channels := make([]wsstream.ChannelType, 5) + channels[stdinChannel] = readChannel(opts.Stdin) + channels[stdoutChannel] = writeChannel(opts.Stdout) + channels[stderrChannel] = writeChannel(opts.Stderr) + channels[errorChannel] = wsstream.WriteChannel + channels[resizeChannel] = wsstream.ReadChannel + return channels +} + +// readChannel returns wsstream.ReadChannel if real is true, or wsstream.IgnoreChannel. +func readChannel(real bool) wsstream.ChannelType { + if real { + return wsstream.ReadChannel + } + return wsstream.IgnoreChannel +} + +// writeChannel returns wsstream.WriteChannel if real is true, or wsstream.IgnoreChannel. +func writeChannel(real bool) wsstream.ChannelType { + if real { + return wsstream.WriteChannel + } + return wsstream.IgnoreChannel +} + +// createWebSocketStreams returns a context containing the websocket connection and +// streams needed to perform an exec or an attach. +func createWebSocketStreams(req *http.Request, w http.ResponseWriter, opts *Options, idleTimeout time.Duration) (*context, bool) { + channels := createChannels(opts) + conn := wsstream.NewConn(map[string]wsstream.ChannelProtocolConfig{ + "": { + Binary: true, + Channels: channels, + }, + preV4BinaryWebsocketProtocol: { + Binary: true, + Channels: channels, + }, + preV4Base64WebsocketProtocol: { + Binary: false, + Channels: channels, + }, + v4BinaryWebsocketProtocol: { + Binary: true, + Channels: channels, + }, + v4Base64WebsocketProtocol: { + Binary: false, + Channels: channels, + }, + }) + conn.SetIdleTimeout(idleTimeout) + negotiatedProtocol, streams, err := conn.Open(httplog.Unlogged(w), req) + if err != nil { + runtime.HandleError(fmt.Errorf("Unable to upgrade websocket connection: %v", err)) + return nil, false + } + + // Send an empty message to the lowest writable channel to notify the client the connection is established + // TODO: make generic to SPDY and WebSockets and do it outside of this method? + switch { + case opts.Stdout: + streams[stdoutChannel].Write([]byte{}) + case opts.Stderr: + streams[stderrChannel].Write([]byte{}) + default: + streams[errorChannel].Write([]byte{}) + } + + ctx := &context{ + conn: conn, + stdinStream: streams[stdinChannel], + stdoutStream: streams[stdoutChannel], + stderrStream: streams[stderrChannel], + tty: opts.TTY, + resizeStream: streams[resizeChannel], + } + + switch negotiatedProtocol { + case v4BinaryWebsocketProtocol, v4Base64WebsocketProtocol: + ctx.writeStatus = v4WriteStatusFunc(streams[errorChannel]) + default: + ctx.writeStatus = v1WriteStatusFunc(streams[errorChannel]) + } + + return ctx, true +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/errors.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/errors.go new file mode 100644 index 000000000..9f16b4eb2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/errors.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package streaming + +import ( + "fmt" + "net/http" + "strconv" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +func ErrorStreamingDisabled(method string) error { + return grpc.Errorf(codes.NotFound, fmt.Sprintf("streaming method %s disabled", method)) +} + +// The error returned when the maximum number of in-flight requests is exceeded. +func ErrorTooManyInFlight() error { + return grpc.Errorf(codes.ResourceExhausted, "maximum number of in-flight requests exceeded") +} + +// Translates a CRI streaming error into an appropriate HTTP response. +func WriteError(err error, w http.ResponseWriter) error { + var status int + switch grpc.Code(err) { + case codes.NotFound: + status = http.StatusNotFound + case codes.ResourceExhausted: + // We only expect to hit this if there is a DoS, so we just wait the full TTL. + // If this is ever hit in steady-state operations, consider increasing the MaxInFlight requests, + // or plumbing through the time to next expiration. + w.Header().Set("Retry-After", strconv.Itoa(int(CacheTTL.Seconds()))) + status = http.StatusTooManyRequests + default: + status = http.StatusInternalServerError + } + w.WriteHeader(status) + _, writeErr := w.Write([]byte(err.Error())) + return writeErr +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/request_cache.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/request_cache.go new file mode 100644 index 000000000..f68f640be --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/request_cache.go @@ -0,0 +1,146 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package streaming + +import ( + "container/list" + "crypto/rand" + "encoding/base64" + "fmt" + "math" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/clock" +) + +var ( + // Timeout after which tokens become invalid. + CacheTTL = 1 * time.Minute + // The maximum number of in-flight requests to allow. + MaxInFlight = 1000 + // Length of the random base64 encoded token identifying the request. + TokenLen = 8 +) + +// requestCache caches streaming (exec/attach/port-forward) requests and generates a single-use +// random token for their retrieval. The requestCache is used for building streaming URLs without +// the need to encode every request parameter in the URL. +type requestCache struct { + // clock is used to obtain the current time + clock clock.Clock + + // tokens maps the generate token to the request for fast retrieval. + tokens map[string]*list.Element + // ll maintains an age-ordered request list for faster garbage collection of expired requests. + ll *list.List + + lock sync.Mutex +} + +// Type representing an *ExecRequest, *AttachRequest, or *PortForwardRequest. +type request interface{} + +type cacheEntry struct { + token string + req request + expireTime time.Time +} + +func newRequestCache() *requestCache { + return &requestCache{ + clock: clock.RealClock{}, + ll: list.New(), + tokens: make(map[string]*list.Element), + } +} + +// Insert the given request into the cache and returns the token used for fetching it out. +func (c *requestCache) Insert(req request) (token string, err error) { + c.lock.Lock() + defer c.lock.Unlock() + + // Remove expired entries. + c.gc() + // If the cache is full, reject the request. + if c.ll.Len() == MaxInFlight { + return "", ErrorTooManyInFlight() + } + token, err = c.uniqueToken() + if err != nil { + return "", err + } + ele := c.ll.PushFront(&cacheEntry{token, req, c.clock.Now().Add(CacheTTL)}) + + c.tokens[token] = ele + return token, nil +} + +// Consume the token (remove it from the cache) and return the cached request, if found. +func (c *requestCache) Consume(token string) (req request, found bool) { + c.lock.Lock() + defer c.lock.Unlock() + ele, ok := c.tokens[token] + if !ok { + return nil, false + } + c.ll.Remove(ele) + delete(c.tokens, token) + + entry := ele.Value.(*cacheEntry) + if c.clock.Now().After(entry.expireTime) { + // Entry already expired. + return nil, false + } + return entry.req, true +} + +// uniqueToken generates a random URL-safe token and ensures uniqueness. +func (c *requestCache) uniqueToken() (string, error) { + const maxTries = 10 + // Number of bytes to be TokenLen when base64 encoded. + tokenSize := math.Ceil(float64(TokenLen) * 6 / 8) + rawToken := make([]byte, int(tokenSize)) + for i := 0; i < maxTries; i++ { + if _, err := rand.Read(rawToken); err != nil { + return "", err + } + encoded := base64.RawURLEncoding.EncodeToString(rawToken) + token := encoded[:TokenLen] + // If it's unique, return it. Otherwise retry. + if _, exists := c.tokens[encoded]; !exists { + return token, nil + } + } + return "", fmt.Errorf("failed to generate unique token") +} + +// Must be write-locked prior to calling. +func (c *requestCache) gc() { + now := c.clock.Now() + for c.ll.Len() > 0 { + oldest := c.ll.Back() + entry := oldest.Value.(*cacheEntry) + if !now.After(entry.expireTime) { + return + } + + // Oldest value is expired; remove it. + c.ll.Remove(oldest) + delete(c.tokens, entry.token) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/server.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/server.go new file mode 100644 index 000000000..875e44462 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/server.go @@ -0,0 +1,344 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package streaming + +import ( + "crypto/tls" + "errors" + "io" + "net/http" + "net/url" + "path" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + + restful "github.com/emicklei/go-restful" + + "k8s.io/apimachinery/pkg/types" + remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" + "k8s.io/client-go/tools/remotecommand" + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" + "k8s.io/kubernetes/pkg/kubelet/server/portforward" + remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand" +) + +// The library interface to serve the stream requests. +type Server interface { + http.Handler + + // Get the serving URL for the requests. + // Requests must not be nil. Responses may be nil iff an error is returned. + GetExec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) + GetAttach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) + GetPortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) + + // Start the server. + // addr is the address to serve on (address:port) stayUp indicates whether the server should + // listen until Stop() is called, or automatically stop after all expected connections are + // closed. Calling Get{Exec,Attach,PortForward} increments the expected connection count. + // Function does not return until the server is stopped. + Start(stayUp bool) error + // Stop the server, and terminate any open connections. + Stop() error +} + +// The interface to execute the commands and provide the streams. +type Runtime interface { + Exec(containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error + Attach(containerID string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error + PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error +} + +// Config defines the options used for running the stream server. +type Config struct { + // The host:port address the server will listen on. + Addr string + // The optional base URL for constructing streaming URLs. If empty, the baseURL will be + // constructed from the serve address. + BaseURL *url.URL + + // How long to leave idle connections open for. + StreamIdleTimeout time.Duration + // How long to wait for clients to create streams. Only used for SPDY streaming. + StreamCreationTimeout time.Duration + + // The streaming protocols the server supports (understands and permits). See + // k8s.io/kubernetes/pkg/kubelet/server/remotecommand/constants.go for available protocols. + // Only used for SPDY streaming. + SupportedRemoteCommandProtocols []string + + // The streaming protocols the server supports (understands and permits). See + // k8s.io/kubernetes/pkg/kubelet/server/portforward/constants.go for available protocols. + // Only used for SPDY streaming. + SupportedPortForwardProtocols []string + + // The config for serving over TLS. If nil, TLS will not be used. + TLSConfig *tls.Config +} + +// DefaultConfig provides default values for server Config. The DefaultConfig is partial, so +// some fields like Addr must still be provided. +var DefaultConfig = Config{ + StreamIdleTimeout: 4 * time.Hour, + StreamCreationTimeout: remotecommandconsts.DefaultStreamCreationTimeout, + SupportedRemoteCommandProtocols: remotecommandconsts.SupportedStreamingProtocols, + SupportedPortForwardProtocols: portforward.SupportedProtocols, +} + +// TODO(timstclair): Add auth(n/z) interface & handling. +func NewServer(config Config, runtime Runtime) (Server, error) { + s := &server{ + config: config, + runtime: &criAdapter{runtime}, + cache: newRequestCache(), + } + + if s.config.BaseURL == nil { + s.config.BaseURL = &url.URL{ + Scheme: "http", + Host: s.config.Addr, + } + if s.config.TLSConfig != nil { + s.config.BaseURL.Scheme = "https" + } + } + + ws := &restful.WebService{} + endpoints := []struct { + path string + handler restful.RouteFunction + }{ + {"/exec/{token}", s.serveExec}, + {"/attach/{token}", s.serveAttach}, + {"/portforward/{token}", s.servePortForward}, + } + // If serving relative to a base path, set that here. + pathPrefix := path.Dir(s.config.BaseURL.Path) + for _, e := range endpoints { + for _, method := range []string{"GET", "POST"} { + ws.Route(ws. + Method(method). + Path(path.Join(pathPrefix, e.path)). + To(e.handler)) + } + } + handler := restful.NewContainer() + handler.Add(ws) + s.handler = handler + + return s, nil +} + +type server struct { + config Config + runtime *criAdapter + handler http.Handler + cache *requestCache +} + +func (s *server) GetExec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { + if req.ContainerId == "" { + return nil, grpc.Errorf(codes.InvalidArgument, "missing required container_id") + } + token, err := s.cache.Insert(req) + if err != nil { + return nil, err + } + return &runtimeapi.ExecResponse{ + Url: s.buildURL("exec", token), + }, nil +} + +func (s *server) GetAttach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { + if req.ContainerId == "" { + return nil, grpc.Errorf(codes.InvalidArgument, "missing required container_id") + } + token, err := s.cache.Insert(req) + if err != nil { + return nil, err + } + return &runtimeapi.AttachResponse{ + Url: s.buildURL("attach", token), + }, nil +} + +func (s *server) GetPortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) { + if req.PodSandboxId == "" { + return nil, grpc.Errorf(codes.InvalidArgument, "missing required pod_sandbox_id") + } + token, err := s.cache.Insert(req) + if err != nil { + return nil, err + } + return &runtimeapi.PortForwardResponse{ + Url: s.buildURL("portforward", token), + }, nil +} + +func (s *server) Start(stayUp bool) error { + if !stayUp { + // TODO(timstclair): Implement this. + return errors.New("stayUp=false is not yet implemented") + } + + server := &http.Server{ + Addr: s.config.Addr, + Handler: s.handler, + TLSConfig: s.config.TLSConfig, + } + if s.config.TLSConfig != nil { + return server.ListenAndServeTLS("", "") // Use certs from TLSConfig. + } else { + return server.ListenAndServe() + } +} + +func (s *server) Stop() error { + // TODO(timstclair): Implement this. + return errors.New("not yet implemented") +} + +func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.handler.ServeHTTP(w, r) +} + +func (s *server) buildURL(method, token string) string { + return s.config.BaseURL.ResolveReference(&url.URL{ + Path: path.Join(method, token), + }).String() +} + +func (s *server) serveExec(req *restful.Request, resp *restful.Response) { + token := req.PathParameter("token") + cachedRequest, ok := s.cache.Consume(token) + if !ok { + http.NotFound(resp.ResponseWriter, req.Request) + return + } + exec, ok := cachedRequest.(*runtimeapi.ExecRequest) + if !ok { + http.NotFound(resp.ResponseWriter, req.Request) + return + } + + streamOpts := &remotecommandserver.Options{ + Stdin: exec.Stdin, + Stdout: true, + Stderr: !exec.Tty, + TTY: exec.Tty, + } + + remotecommandserver.ServeExec( + resp.ResponseWriter, + req.Request, + s.runtime, + "", // unused: podName + "", // unusued: podUID + exec.ContainerId, + exec.Cmd, + streamOpts, + s.config.StreamIdleTimeout, + s.config.StreamCreationTimeout, + s.config.SupportedRemoteCommandProtocols) +} + +func (s *server) serveAttach(req *restful.Request, resp *restful.Response) { + token := req.PathParameter("token") + cachedRequest, ok := s.cache.Consume(token) + if !ok { + http.NotFound(resp.ResponseWriter, req.Request) + return + } + attach, ok := cachedRequest.(*runtimeapi.AttachRequest) + if !ok { + http.NotFound(resp.ResponseWriter, req.Request) + return + } + + streamOpts := &remotecommandserver.Options{ + Stdin: attach.Stdin, + Stdout: true, + Stderr: !attach.Tty, + TTY: attach.Tty, + } + remotecommandserver.ServeAttach( + resp.ResponseWriter, + req.Request, + s.runtime, + "", // unused: podName + "", // unusued: podUID + attach.ContainerId, + streamOpts, + s.config.StreamIdleTimeout, + s.config.StreamCreationTimeout, + s.config.SupportedRemoteCommandProtocols) +} + +func (s *server) servePortForward(req *restful.Request, resp *restful.Response) { + token := req.PathParameter("token") + cachedRequest, ok := s.cache.Consume(token) + if !ok { + http.NotFound(resp.ResponseWriter, req.Request) + return + } + pf, ok := cachedRequest.(*runtimeapi.PortForwardRequest) + if !ok { + http.NotFound(resp.ResponseWriter, req.Request) + return + } + + portForwardOptions, err := portforward.BuildV4Options(pf.Port) + if err != nil { + resp.WriteError(http.StatusBadRequest, err) + return + } + + portforward.ServePortForward( + resp.ResponseWriter, + req.Request, + s.runtime, + pf.PodSandboxId, + "", // unused: podUID + portForwardOptions, + s.config.StreamIdleTimeout, + s.config.StreamCreationTimeout, + s.config.SupportedPortForwardProtocols) +} + +// criAdapter wraps the Runtime functions to conform to the remotecommand interfaces. +// The adapter binds the container ID to the container name argument, and the pod sandbox ID to the pod name. +type criAdapter struct { + Runtime +} + +var _ remotecommandserver.Executor = &criAdapter{} +var _ remotecommandserver.Attacher = &criAdapter{} +var _ portforward.PortForwarder = &criAdapter{} + +func (a *criAdapter) ExecInContainer(podName string, podUID types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { + return a.Exec(container, cmd, in, out, err, tty, resize) +} + +func (a *criAdapter) AttachContainer(podName string, podUID types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { + return a.Attach(container, in, out, err, tty, resize) +} + +func (a *criAdapter) PortForward(podName string, podUID types.UID, port int32, stream io.ReadWriteCloser) error { + return a.Runtime.PortForward(podName, port, stream) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/exec/doc.go b/vendor/k8s.io/kubernetes/pkg/util/exec/doc.go new file mode 100644 index 000000000..de7301c8d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/exec/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package exec provides an injectable interface and implementations for running commands. +package exec // import "k8s.io/kubernetes/pkg/util/exec" diff --git a/vendor/k8s.io/kubernetes/pkg/util/exec/exec.go b/vendor/k8s.io/kubernetes/pkg/util/exec/exec.go new file mode 100644 index 000000000..f43bfa7a1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/exec/exec.go @@ -0,0 +1,200 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "io" + osexec "os/exec" + "syscall" + "time" +) + +// ErrExecutableNotFound is returned if the executable is not found. +var ErrExecutableNotFound = osexec.ErrNotFound + +// Interface is an interface that presents a subset of the os/exec API. Use this +// when you want to inject fakeable/mockable exec behavior. +type Interface interface { + // Command returns a Cmd instance which can be used to run a single command. + // This follows the pattern of package os/exec. + Command(cmd string, args ...string) Cmd + + // LookPath wraps os/exec.LookPath + LookPath(file string) (string, error) +} + +// Cmd is an interface that presents an API that is very similar to Cmd from os/exec. +// As more functionality is needed, this can grow. Since Cmd is a struct, we will have +// to replace fields with get/set method pairs. +type Cmd interface { + // Run runs the command to the completion. + Run() error + // CombinedOutput runs the command and returns its combined standard output + // and standard error. This follows the pattern of package os/exec. + CombinedOutput() ([]byte, error) + // Output runs the command and returns standard output, but not standard err + Output() ([]byte, error) + SetDir(dir string) + SetStdin(in io.Reader) + SetStdout(out io.Writer) + SetStderr(out io.Writer) + // Stops the command by sending SIGTERM. It is not guaranteed the + // process will stop before this function returns. If the process is not + // responding, an internal timer function will send a SIGKILL to force + // terminate after 10 seconds. + Stop() +} + +// ExitError is an interface that presents an API similar to os.ProcessState, which is +// what ExitError from os/exec is. This is designed to make testing a bit easier and +// probably loses some of the cross-platform properties of the underlying library. +type ExitError interface { + String() string + Error() string + Exited() bool + ExitStatus() int +} + +// Implements Interface in terms of really exec()ing. +type executor struct{} + +// New returns a new Interface which will os/exec to run commands. +func New() Interface { + return &executor{} +} + +// Command is part of the Interface interface. +func (executor *executor) Command(cmd string, args ...string) Cmd { + return (*cmdWrapper)(osexec.Command(cmd, args...)) +} + +// LookPath is part of the Interface interface +func (executor *executor) LookPath(file string) (string, error) { + return osexec.LookPath(file) +} + +// Wraps exec.Cmd so we can capture errors. +type cmdWrapper osexec.Cmd + +func (cmd *cmdWrapper) SetDir(dir string) { + cmd.Dir = dir +} + +func (cmd *cmdWrapper) SetStdin(in io.Reader) { + cmd.Stdin = in +} + +func (cmd *cmdWrapper) SetStdout(out io.Writer) { + cmd.Stdout = out +} + +func (cmd *cmdWrapper) SetStderr(out io.Writer) { + cmd.Stderr = out +} + +// Run is part of the Cmd interface. +func (cmd *cmdWrapper) Run() error { + return (*osexec.Cmd)(cmd).Run() +} + +// CombinedOutput is part of the Cmd interface. +func (cmd *cmdWrapper) CombinedOutput() ([]byte, error) { + out, err := (*osexec.Cmd)(cmd).CombinedOutput() + if err != nil { + return out, handleError(err) + } + return out, nil +} + +func (cmd *cmdWrapper) Output() ([]byte, error) { + out, err := (*osexec.Cmd)(cmd).Output() + if err != nil { + return out, handleError(err) + } + return out, nil +} + +// Stop is part of the Cmd interface. +func (cmd *cmdWrapper) Stop() { + c := (*osexec.Cmd)(cmd) + if c.ProcessState.Exited() { + return + } + c.Process.Signal(syscall.SIGTERM) + time.AfterFunc(10*time.Second, func() { + if c.ProcessState.Exited() { + return + } + c.Process.Signal(syscall.SIGKILL) + }) +} + +func handleError(err error) error { + if ee, ok := err.(*osexec.ExitError); ok { + // Force a compile fail if exitErrorWrapper can't convert to ExitError. + var x ExitError = &ExitErrorWrapper{ee} + return x + } + if ee, ok := err.(*osexec.Error); ok { + if ee.Err == osexec.ErrNotFound { + return ErrExecutableNotFound + } + } + return err +} + +// ExitErrorWrapper is an implementation of ExitError in terms of os/exec ExitError. +// Note: standard exec.ExitError is type *os.ProcessState, which already implements Exited(). +type ExitErrorWrapper struct { + *osexec.ExitError +} + +var _ ExitError = ExitErrorWrapper{} + +// ExitStatus is part of the ExitError interface. +func (eew ExitErrorWrapper) ExitStatus() int { + ws, ok := eew.Sys().(syscall.WaitStatus) + if !ok { + panic("can't call ExitStatus() on a non-WaitStatus exitErrorWrapper") + } + return ws.ExitStatus() +} + +// CodeExitError is an implementation of ExitError consisting of an error object +// and an exit code (the upper bits of os.exec.ExitStatus). +type CodeExitError struct { + Err error + Code int +} + +var _ ExitError = CodeExitError{} + +func (e CodeExitError) Error() string { + return e.Err.Error() +} + +func (e CodeExitError) String() string { + return e.Err.Error() +} + +func (e CodeExitError) Exited() bool { + return true +} + +func (e CodeExitError) ExitStatus() int { + return e.Code +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/exec/fake_exec.go b/vendor/k8s.io/kubernetes/pkg/util/exec/fake_exec.go new file mode 100644 index 000000000..c7fcd6cec --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/exec/fake_exec.go @@ -0,0 +1,145 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "fmt" + "io" +) + +// A simple scripted Interface type. +type FakeExec struct { + CommandScript []FakeCommandAction + CommandCalls int + LookPathFunc func(string) (string, error) +} + +type FakeCommandAction func(cmd string, args ...string) Cmd + +func (fake *FakeExec) Command(cmd string, args ...string) Cmd { + if fake.CommandCalls > len(fake.CommandScript)-1 { + panic(fmt.Sprintf("ran out of Command() actions. Could not handle command [%d]: %s args: %v", fake.CommandCalls, cmd, args)) + } + i := fake.CommandCalls + fake.CommandCalls++ + return fake.CommandScript[i](cmd, args...) +} + +func (fake *FakeExec) LookPath(file string) (string, error) { + return fake.LookPathFunc(file) +} + +// A simple scripted Cmd type. +type FakeCmd struct { + Argv []string + CombinedOutputScript []FakeCombinedOutputAction + CombinedOutputCalls int + CombinedOutputLog [][]string + RunScript []FakeRunAction + RunCalls int + RunLog [][]string + Dirs []string + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +func InitFakeCmd(fake *FakeCmd, cmd string, args ...string) Cmd { + fake.Argv = append([]string{cmd}, args...) + return fake +} + +type FakeCombinedOutputAction func() ([]byte, error) +type FakeRunAction func() ([]byte, []byte, error) + +func (fake *FakeCmd) SetDir(dir string) { + fake.Dirs = append(fake.Dirs, dir) +} + +func (fake *FakeCmd) SetStdin(in io.Reader) { + fake.Stdin = in +} + +func (fake *FakeCmd) SetStdout(out io.Writer) { + fake.Stdout = out +} + +func (fake *FakeCmd) SetStderr(out io.Writer) { + fake.Stderr = out +} + +func (fake *FakeCmd) Run() error { + if fake.RunCalls > len(fake.RunScript)-1 { + panic("ran out of Run() actions") + } + if fake.RunLog == nil { + fake.RunLog = [][]string{} + } + i := fake.RunCalls + fake.RunLog = append(fake.RunLog, append([]string{}, fake.Argv...)) + fake.RunCalls++ + stdout, stderr, err := fake.RunScript[i]() + if stdout != nil { + fake.Stdout.Write(stdout) + } + if stderr != nil { + fake.Stderr.Write(stderr) + } + return err +} + +func (fake *FakeCmd) CombinedOutput() ([]byte, error) { + if fake.CombinedOutputCalls > len(fake.CombinedOutputScript)-1 { + panic("ran out of CombinedOutput() actions") + } + if fake.CombinedOutputLog == nil { + fake.CombinedOutputLog = [][]string{} + } + i := fake.CombinedOutputCalls + fake.CombinedOutputLog = append(fake.CombinedOutputLog, append([]string{}, fake.Argv...)) + fake.CombinedOutputCalls++ + return fake.CombinedOutputScript[i]() +} + +func (fake *FakeCmd) Output() ([]byte, error) { + return nil, fmt.Errorf("unimplemented") +} + +func (fake *FakeCmd) Stop() { + // no-op +} + +// A simple fake ExitError type. +type FakeExitError struct { + Status int +} + +func (fake *FakeExitError) String() string { + return fmt.Sprintf("exit %d", fake.Status) +} + +func (fake *FakeExitError) Error() string { + return fake.String() +} + +func (fake *FakeExitError) Exited() bool { + return true +} + +func (fake *FakeExitError) ExitStatus() int { + return fake.Status +}